aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/accessibility/speakup/Kconfig13
-rw-r--r--drivers/accessibility/speakup/Makefile2
-rw-r--r--drivers/accessibility/speakup/serialio.c8
-rw-r--r--drivers/accessibility/speakup/spk_priv.h1
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c7
-rw-r--r--drivers/accessibility/speakup/spk_types.h1
-rw-r--r--drivers/accessibility/speakup/synth.c2
-rw-r--r--drivers/acpi/acpi_apd.c19
-rw-r--r--drivers/acpi/nfit/core.c157
-rw-r--r--drivers/acpi/nfit/intel.c386
-rw-r--r--drivers/acpi/nfit/intel.h61
-rw-r--r--drivers/acpi/nfit/nfit.h38
-rw-r--r--drivers/acpi/osl.c23
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/ata/libata-core.c16
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_atp867x.c4
-rw-r--r--drivers/ata/pata_serverworks.c2
-rw-r--r--drivers/ata/sata_mv.c12
-rw-r--r--drivers/ata/sata_promise.c8
-rw-r--r--drivers/ata/sata_sx4.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c16
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/base/core.c12
-rw-r--r--drivers/base/firmware_loader/fallback.c4
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c43
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/null_blk_main.c2
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c37
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h19
-rw-r--r--drivers/block/rnbd/rnbd-srv.c33
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/block/virtio_blk.c31
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/hw_random/ingenic-rng.c9
-rw-r--r--drivers/char/ipmi/kcs_bmc.c2
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/nvram.c2
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/actions/owl-s500.c89
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/at91rm9200.c3
-rw-r--r--drivers/clk/at91/at91sam9260.c3
-rw-r--r--drivers/clk/at91/at91sam9g45.c5
-rw-r--r--drivers/clk/at91/at91sam9n12.c7
-rw-r--r--drivers/clk/at91/at91sam9rl.c3
-rw-r--r--drivers/clk/at91/at91sam9x5.c7
-rw-r--r--drivers/clk/at91/clk-generated.c44
-rw-r--r--drivers/clk/at91/clk-main.c6
-rw-r--r--drivers/clk/at91/clk-master.c310
-rw-r--r--drivers/clk/at91/clk-peripheral.c111
-rw-r--r--drivers/clk/at91/clk-programmable.c11
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c547
-rw-r--r--drivers/clk/at91/clk-system.c4
-rw-r--r--drivers/clk/at91/clk-utmi.c103
-rw-r--r--drivers/clk/at91/dt-compat.c25
-rw-r--r--drivers/clk/at91/pmc.h43
-rw-r--r--drivers/clk/at91/sam9x60.c66
-rw-r--r--drivers/clk/at91/sama5d2.c41
-rw-r--r--drivers/clk/at91/sama5d3.c8
-rw-r--r--drivers/clk/at91/sama5d4.c7
-rw-r--r--drivers/clk/at91/sama7g5.c1059
-rw-r--r--drivers/clk/at91/sckc.c5
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c25
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c4
-rw-r--r--drivers/clk/clk-pwm.c7
-rw-r--r--drivers/clk/clk-qoriq.c10
-rw-r--r--drivers/clk/clk-sparx5.c295
-rw-r--r--drivers/clk/clk-versaclock5.c82
-rw-r--r--drivers/clk/clk.c38
-rw-r--r--drivers/clk/davinci/pll.c2
-rw-r--r--drivers/clk/imx/clk-pllv3.c4
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c165
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c97
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c13
-rw-r--r--drivers/clk/mmp/clk-pxa168.c1
-rw-r--r--drivers/clk/mmp/clk-pxa910.c1
-rw-r--r--drivers/clk/qcom/Kconfig25
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c70
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h15
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c14
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c4
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c26
-rw-r--r--drivers/clk/qcom/gdsc.c39
-rw-r--r--drivers/clk/qcom/gdsc.h2
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c27
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c27
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c320
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c348
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c476
-rw-r--r--drivers/clk/rockchip/clk-pll.c70
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c39
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c8
-rw-r--r--drivers/clk/sirf/clk-atlas6.c2
-rw-r--r--drivers/clk/tegra/clk-pll.c20
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-cgu-pll.c2
-rw-r--r--drivers/clk/x86/clk-cgu.c32
-rw-r--r--drivers/clk/x86/clk-fch.c101
-rw-r--r--drivers/clk/x86/clk-st.c78
-rw-r--r--drivers/clocksource/Kconfig16
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c4
-rw-r--r--drivers/clocksource/timer-clint.c226
-rw-r--r--drivers/clocksource/timer-riscv.c17
-rw-r--r--drivers/clocksource/timer-stm32-lp.c221
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c245
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c10
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c4
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c6
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c12
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c46
-rw-r--r--drivers/dax/super.c19
-rw-r--r--drivers/dma-buf/dma-resv.c15
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/fsldma.h12
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/iop-adma.h12
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/ghes_edac.c10
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/ie31200_edac.c50
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/sb_edac.c4
-rw-r--r--drivers/edac/skx_common.c4
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/arm_sdei.c5
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c12
-rw-r--r--drivers/firmware/ti_sci.c155
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c6
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c124
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c41
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h54
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c144
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c44
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_cmn.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c7
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c20
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.c2
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c40
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c36
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c2
-rw-r--r--drivers/gpu/drm/savage/savage_state.c10
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c13
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c33
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c3
-rw-r--r--drivers/hid/hid-lg-g15.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c2
-rw-r--r--drivers/hid/hid-microsoft.c3
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c1
-rw-r--r--drivers/hid/wacom_wac.c32
-rw-r--r--drivers/hsi/clients/ssi_protocol.c6
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_util.c65
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwmon/adt7462.c8
-rw-r--r--drivers/hwmon/applesmc.c31
-rw-r--r--drivers/hwmon/emc1403.c4
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/nct7904.c4
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/hwmon/pmbus/isl68137.c7
-rw-r--r--drivers/hwmon/pwm-fan.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/hwspinlock/Kconfig10
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c70
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c2
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c4
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c8
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c6
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c4
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c69
-rw-r--r--drivers/i2c/busses/i2c-at91.h3
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c17
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c39
-rw-r--r--drivers/i2c/busses/i2c-emev2.c3
-rw-r--r--drivers/i2c/busses/i2c-fsi.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c19
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c86
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c9
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c1
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c3
-rw-r--r--drivers/i2c/busses/i2c-rcar.c16
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c39
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c3
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c101
-rw-r--r--drivers/i2c/busses/i2c-viapro.c8
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c10
-rw-r--r--drivers/i2c/i2c-core-base.c160
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c2
-rw-r--r--drivers/ide/hpt366.c6
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-taskfile.c12
-rw-r--r--drivers/ide/sis5513.c2
-rw-r--r--drivers/idle/intel_idle.c16
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c2
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/dpot-dac.c5
-rw-r--r--drivers/iio/health/max30102.c4
-rw-r--r--drivers/iio/imu/adis.c6
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/light/si1145.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/infiniband/core/cm.c12
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/rw.c1
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c1
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c10
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1
-rw-r--r--drivers/input/input-mt.c2
-rw-r--r--drivers/input/joystick/db9.c10
-rw-r--r--drivers/input/joystick/fsia6b.c4
-rw-r--r--drivers/input/joystick/gamecon.c11
-rw-r--r--drivers/input/joystick/sidewinder.c15
-rw-r--r--drivers/input/joystick/spaceball.c8
-rw-r--r--drivers/input/keyboard/adp5589-keys.c2
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/cm109.c8
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs269a.c21
-rw-r--r--drivers/input/misc/pwm-vibra.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/cyapa_gen3.c4
-rw-r--r--drivers/input/mouse/cyapa_gen5.c2
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h20
-rw-r--r--drivers/input/mouse/elan_i2c_core.c195
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c165
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c35
-rw-r--r--drivers/input/mouse/elantech.c12
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/navpoint.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/sentelic.c4
-rw-r--r--drivers/input/mouse/sermouse.c4
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/sparse-keymap.c2
-rw-r--r--drivers/input/tablet/gtco.c6
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c58
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/exc3000.c248
-rw-r--r--drivers/input/touchscreen/iqs5xx.c2
-rw-r--r--drivers/input/touchscreen/max11801_ts.c1
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c2
-rw-r--r--drivers/iommu/Kconfig146
-rw-r--r--drivers/iommu/Makefile15
-rw-r--r--drivers/iommu/amd/Kconfig44
-rw-r--r--drivers/iommu/amd/Makefile4
-rw-r--r--drivers/iommu/amd/init.c15
-rw-r--r--drivers/iommu/amd/iommu.c31
-rw-r--r--drivers/iommu/amd/iommu_v2.c2
-rw-r--r--drivers/iommu/arm/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c (renamed from drivers/iommu/arm-smmu-v3.c)15
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c (renamed from drivers/iommu/arm-smmu-impl.c)60
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c278
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c (renamed from drivers/iommu/arm-smmu-qcom.c)0
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c (renamed from drivers/iommu/arm-smmu.c)42
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h (renamed from drivers/iommu/arm-smmu.h)6
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c (renamed from drivers/iommu/qcom_iommu.c)66
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c32
-rw-r--r--drivers/iommu/fsl_pamu.c5
-rw-r--r--drivers/iommu/fsl_pamu_domain.c8
-rw-r--r--drivers/iommu/intel/Kconfig87
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/debugfs.c2
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c142
-rw-r--r--drivers/iommu/intel/irq_remapping.c1
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h (renamed from drivers/iommu/intel/intel-pasid.h)2
-rw-r--r--drivers/iommu/intel/svm.c338
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c18
-rw-r--r--drivers/iommu/io-pgtable-arm.c21
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/ipmmu-vmsa.c14
-rw-r--r--drivers/iommu/msm_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c112
-rw-r--r--drivers/iommu/mtk_iommu.h23
-rw-r--r--drivers/iommu/mtk_iommu_v1.c10
-rw-r--r--drivers/iommu/omap-iommu-debug.c3
-rw-r--r--drivers/iommu/omap-iommu.c22
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/iommu/tegra-gart.c8
-rw-r--r--drivers/iommu/tegra-smmu.c8
-rw-r--r--drivers/iommu/virtio-iommu.c36
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c8
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/irqchip/irq-ingenic.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c14
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c95
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c152
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irqchip.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c8
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi_8xx.h1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/isdn/hardware/mISDN/isdnhdlc.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c8
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/lightnvm/pblk-core.c2
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c2
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/util.c14
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-mpath.c24
-rw-r--r--drivers/md/dm-thin-metadata.c10
-rw-r--r--drivers/md/dm-writecache.c12
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md-autodetect.c4
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-cluster.c1
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c14
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c36
-rw-r--r--drivers/media/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c103
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c12
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c24
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/mt352.c2
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/or51132.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb-frontends/zl10353.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c23
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c2
-rw-r--r--drivers/media/pci/ttpci/av7110_ipack.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/ttpci/budget.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h4
-rw-r--r--drivers/media/platform/sh_vou.c4
-rw-r--r--drivers/media/radio/radio-si476x.c3
-rw-r--r--drivers/media/radio/tea575x.c2
-rw-r--r--drivers/media/rc/bpf-lirc.c2
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/rc/ir-sony-decoder.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c36
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c12
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/memory/mtk-smi.c22
-rw-r--r--drivers/memory/omap-gpmc.c1
-rw-r--r--drivers/memstick/core/ms_block.c12
-rw-r--r--drivers/memstick/host/jmb38x_ms.c4
-rw-r--r--drivers/memstick/host/tifm_ms.c4
-rw-r--r--drivers/message/fusion/mptbase.c6
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/mfd/Kconfig33
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab3100-core.c2
-rw-r--r--drivers/mfd/ab3100-otp.c20
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c19
-rw-r--r--drivers/mfd/arizona-core.c20
-rw-r--r--drivers/mfd/atmel-smc.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c4
-rw-r--r--drivers/mfd/da9063-core.c31
-rw-r--r--drivers/mfd/da9063-i2c.c271
-rw-r--r--drivers/mfd/db8500-prcmu.c10
-rw-r--r--drivers/mfd/dln2.c4
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c19
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c7
-rw-r--r--drivers/mfd/iqs62x.c6
-rw-r--r--drivers/mfd/kempld-core.c30
-rw-r--r--drivers/mfd/khadas-mcu.c142
-rw-r--r--drivers/mfd/lm3533-ctrlbank.c94
-rw-r--r--drivers/mfd/lp873x.c2
-rw-r--r--drivers/mfd/lp87565.c2
-rw-r--r--drivers/mfd/madera-core.c33
-rw-r--r--drivers/mfd/madera-i2c.c1
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/mfd-core.c115
-rw-r--r--drivers/mfd/motorola-cpcap.c23
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/omap-usb-host.c10
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/rave-sp.c6
-rw-r--r--drivers/mfd/rn5t618.c46
-rw-r--r--drivers/mfd/si476x-cmd.c74
-rw-r--r--drivers/mfd/si476x-i2c.c7
-rw-r--r--drivers/mfd/sky81452.c2
-rw-r--r--drivers/mfd/smsc-ece1099.c87
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c82
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/syscon.c6
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tps65010.c5
-rw-r--r--drivers/mfd/tps65086.c2
-rw-r--r--drivers/mfd/tps65217.c6
-rw-r--r--drivers/mfd/tps65218.c6
-rw-r--r--drivers/mfd/tps6586x.c7
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/tps65912-i2c.c2
-rw-r--r--drivers/mfd/tps65912-spi.c2
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/wm831x-core.c4
-rw-r--r--drivers/mfd/wm8350-core.c4
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/eeprom/at25.c10
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c16
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c8
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c8
-rw-r--r--drivers/misc/habanalabs/common/device.c9
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c9
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h7
-rw-r--r--drivers/misc/habanalabs/common/memory.c9
-rw-r--r--drivers/misc/habanalabs/common/mmu.c2
-rw-r--r--drivers/misc/habanalabs/common/pci.c8
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c92
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h3
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c8
-rw-r--r--drivers/misc/habanalabs/goya/goya.c31
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c8
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c40
-rw-r--r--drivers/misc/mic/scif/scif_api.c4
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c8
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c13
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c67
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c10
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c55
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c2
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c6
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c5
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/mux/adgs1408.c2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c6
-rw-r--r--drivers/net/arcnet/com20020-isa.c12
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c19
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c54
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c2
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c8
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c3
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c54
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c29
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c25
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c12
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c17
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c8
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100.c8
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c13
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.h8
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c14
-rw-r--r--drivers/net/ethernet/sfc/farch.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c28
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/skfp/cfm.c17
-rw-r--r--drivers/net/fddi/skfp/fplustm.c4
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c4
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c4
-rw-r--r--drivers/net/fddi/skfp/smt.c7
-rw-r--r--drivers/net/fjes/fjes_main.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/mkiss.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ipa/ipa.h3
-rw-r--r--drivers/net/ipa/ipa_modem.c56
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c27
-rw-r--r--drivers/net/macvlan.c23
-rw-r--r--drivers/net/mii.c2
-rw-r--r--drivers/net/netdevsim/bus.c2
-rw-r--r--drivers/net/netdevsim/fib.c6
-rw-r--r--drivers/net/phy/adin.c4
-rw-r--r--drivers/net/phy/dp83640.c8
-rw-r--r--drivers/net/phy/fixed_phy.c4
-rw-r--r--drivers/net/phy/marvell10g.c18
-rw-r--r--drivers/net/phy/mscc/mscc_main.c4
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c4
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/phy/sfp.c12
-rw-r--r--drivers/net/plip/plip.c26
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/aqc111.c6
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/r8152.c8
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/wan/dlci.c3
-rw-r--r--drivers/net/wan/hdlc.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c17
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/x25_asy.c16
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h6
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nfc/pn533/pn533.c4
-rw-r--r--drivers/nfc/st21nfca/dep.c2
-rw-r--r--drivers/nfc/trf7970a.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.h2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h2
-rw-r--r--drivers/ntb/ntb_transport.c4
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/bus.c16
-rw-r--r--drivers/nvdimm/core.c149
-rw-r--r--drivers/nvdimm/dimm_devs.c124
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvdimm/security.c13
-rw-r--r--drivers/nvdimm/virtio_pmem.c4
-rw-r--r--drivers/nvme/host/core.c98
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/multipath.c69
-rw-r--r--drivers/nvme/host/nvme.h31
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/rdma.c4
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c8
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c1
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/passthru.c25
-rw-r--r--drivers/nvme/target/rdma.c4
-rw-r--r--drivers/of/address.c21
-rw-r--r--drivers/opp/core.c19
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parport/ieee1284.c6
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c6
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c12
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c4
-rw-r--r--drivers/pci/p2pdma.c10
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c8
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c8
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c24
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c42
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c3
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c98
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c399
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c13
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/mlx-platform.c106
-rw-r--r--drivers/platform/x86/surfacepro3_button.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/power/supply/ab8500_charger.c4
-rw-r--r--drivers/power/supply/ab8500_fg.c4
-rw-r--r--drivers/power/supply/abx500_chargalg.c26
-rw-r--r--drivers/power/supply/axp20x_usb_power.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c2
-rw-r--r--drivers/power/supply/max8925_power.c2
-rw-r--r--drivers/power/supply/wm831x_power.c2
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/ps3/ps3av.c2
-rw-r--r--drivers/ps3/ps3av_cmd.c4
-rw-r--r--drivers/ptp/ptp_clockmatrix.c56
-rw-r--r--drivers/ptp/ptp_clockmatrix.h2
-rw-r--r--drivers/pwm/core.c14
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c12
-rw-r--r--drivers/pwm/pwm-bcm-kona.c2
-rw-r--r--drivers/pwm/pwm-clps711x.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c2
-rw-r--r--drivers/pwm/pwm-imx27.c2
-rw-r--r--drivers/pwm/pwm-iqs620a.c15
-rw-r--r--drivers/pwm/pwm-mediatek.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c4
-rw-r--r--drivers/pwm/pwm-sifive.c2
-rw-r--r--drivers/pwm/pwm-stm32-lp.c2
-rw-r--r--drivers/pwm/pwm-sun4i.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c8
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c9
-rw-r--r--drivers/rapidio/rio-scan.c8
-rw-r--r--drivers/regulator/axp20x-regulator.c8
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/slg51000-regulator.c2
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig34
-rw-r--r--drivers/remoteproc/Makefile5
-rw-r--r--drivers/remoteproc/ingenic_rproc.c84
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1
-rw-r--r--drivers/remoteproc/qcom_common.c133
-rw-r--r--drivers/remoteproc/qcom_common.h5
-rw-r--r--drivers/remoteproc/qcom_pil_info.c129
-rw-r--r--drivers/remoteproc/qcom_pil_info.h9
-rw-r--r--drivers/remoteproc/qcom_q6v5.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c16
-rw-r--r--drivers/remoteproc/qcom_q6v5_ipa_notify.c85
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c157
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c15
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c14
-rw-r--r--drivers/remoteproc/qcom_sysmon.c4
-rw-r--r--drivers/remoteproc/qcom_wcnss.c14
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c124
-rw-r--r--drivers/remoteproc/remoteproc_core.c457
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c325
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c90
-rw-r--r--drivers/remoteproc/remoteproc_internal.h42
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c17
-rw-r--r--drivers/remoteproc/stm32_rproc.c214
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c787
-rw-r--r--drivers/remoteproc/ti_sci_proc.h104
-rw-r--r--drivers/reset/reset-imx7.c14
-rw-r--r--drivers/rpmsg/qcom_glink_native.c4
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c63
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-cpcap.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1374.c262
-rw-r--r--drivers/rtc/rtc-goldfish.c1
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-max77686.c23
-rw-r--r--drivers/rtc/rtc-mcp795.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c144
-rw-r--r--drivers/rtc/rtc-pcf85063.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c2
-rw-r--r--drivers/rtc/rtc-pl031.c1
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/pkey_api.c4
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/s390/net/qeth_ethtool.c6
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/FlashPoint.c9
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c8
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c40
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c28
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c10
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/arm/fas216.c12
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c6
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c14
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bfa/bfa_svc.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1
-rw-r--r--drivers/scsi/csiostor/csio_hw.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/main.c28
-rw-r--r--drivers/scsi/cxlflash/superpipe.c10
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c4
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c10
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hpsa.c10
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c6
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/imm.c14
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_node_context.c6
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c12
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c8
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/myrb.c8
-rw-r--r--drivers/scsi/ncr53c8xx.c14
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/ppa.c10
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c20
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c28
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/sd.h11
-rw-r--r--drivers/scsi/sd_zbc.c93
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c17
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c2
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c1
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c2
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c16
-rw-r--r--drivers/scsi/ufs/ufshcd.c39
-rw-r--r--drivers/scsi/ufs/ufshcd.h9
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/xen-scsifront.c2
-rw-r--r--drivers/sh/clk/cpg.c21
-rw-r--r--drivers/soc/qcom/socinfo.c22
-rw-r--r--drivers/soc/tegra/pmc.c2
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-cpm.c4
-rw-r--r--drivers/spi/spi-sprd-adi.c2
-rw-r--r--drivers/spi/spi-stm32.c100
-rw-r--r--drivers/spi/spi.c21
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_mipscore.c2
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c2
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c2
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/usbvision/usbvision-i2c.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c4
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/khadas_mcu_fan.c162
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c8
-rw-r--r--drivers/thermal/qcom/tsens-v1.c4
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/switch.c2
-rw-r--r--drivers/thunderbolt/tunnel.c4
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/n_hdlc.c2
-rw-r--r--drivers/tty/n_r3964.c1
-rw-r--r--drivers/tty/serial/8250/8250_em.c2
-rw-r--r--drivers/tty/serial/8250/8250_exar.c24
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c6
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c16
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c11
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c8
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ioctl.c2
-rw-r--r--drivers/tty/vt/vt.c11
-rw-r--r--drivers/tty/vt/vt_ioctl.c12
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c3
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/core/driver.c40
-rw-r--r--drivers/usb/core/generic.c5
-rw-r--r--drivers/usb/core/hcd-pci.c5
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/gadget.c107
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/function/f_ncm.c81
-rw-r--r--drivers/usb/gadget/function/f_tcm.c7
-rw-r--r--drivers/usb/gadget/u_f.h38
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/host/isp116x-hcd.c6
-rw-r--r--drivers/usb/host/ohci-exynos.c5
-rw-r--r--drivers/usb/host/pci-quirks.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-debugfs.c8
-rw-r--r--drivers/usb/host/xhci-hub.c21
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c19
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c4
-rw-r--r--drivers/usb/host/xhci.c5
-rw-r--r--drivers/usb/misc/lvstest.c2
-rw-r--r--drivers/usb/misc/yurex.c2
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/musb/musb_core.c13
-rw-r--r--drivers/usb/musb/musb_dsps.c6
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c4
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/musb/musb_virthub.c2
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/phy/phy-jz4770.c1
-rw-r--r--drivers/usb/storage/sddr55.c2
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c28
-rw-r--r--drivers/usb/typec/ucsi/displayport.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c103
-rw-r--r--drivers/usb/usbip/stub_dev.c6
-rw-r--r--drivers/vdpa/Kconfig19
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c40
-rw-r--r--drivers/vdpa/mlx5/Makefile4
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h91
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h168
-rw-r--r--drivers/vdpa/mlx5/core/mr.c486
-rw-r--r--drivers/vdpa/mlx5/core/resources.c284
-rw-r--r--drivers/vdpa/mlx5/net/main.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c1974
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h24
-rw-r--r--drivers/vdpa/vdpa.c4
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c124
-rw-r--r--drivers/vfio/pci/vfio_pci.c56
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h2
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c120
-rw-r--r--drivers/vfio/vfio.c13
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c475
-rw-r--r--drivers/vhost/Kconfig1
-rw-r--r--drivers/vhost/iotlb.c4
-rw-r--r--drivers/vhost/net.c22
-rw-r--r--drivers/vhost/vdpa.c183
-rw-r--r--drivers/vhost/vhost.c39
-rw-r--r--drivers/vhost/vhost.h11
-rw-r--r--drivers/video/backlight/88pm860x_bl.c13
-rw-r--r--drivers/video/backlight/Kconfig15
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c10
-rw-r--r--drivers/video/backlight/adp8860_bl.c12
-rw-r--r--drivers/video/backlight/adp8870_bl.c10
-rw-r--r--drivers/video/backlight/as3711_bl.c11
-rw-r--r--drivers/video/backlight/backlight.c206
-rw-r--r--drivers/video/backlight/bd6107.c7
-rw-r--r--drivers/video/backlight/corgi_lcd.c8
-rw-r--r--drivers/video/backlight/cr_bllcd.c26
-rw-r--r--drivers/video/backlight/da903x_bl.c13
-rw-r--r--drivers/video/backlight/ep93xx_bl.c8
-rw-r--r--drivers/video/backlight/generic_bl.c110
-rw-r--r--drivers/video/backlight/gpio_backlight.c17
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/ili922x.c8
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c6
-rw-r--r--drivers/video/backlight/lcd.c1
-rw-r--r--drivers/video/backlight/led_bl.c7
-rw-r--r--drivers/video/backlight/lm3533_bl.c10
-rw-r--r--drivers/video/backlight/lm3630a_bl.c4
-rw-r--r--drivers/video/backlight/lms501kf03.c9
-rw-r--r--drivers/video/backlight/locomolcd.c6
-rw-r--r--drivers/video/backlight/lv5207lp.c7
-rw-r--r--drivers/video/backlight/max8925_bl.c13
-rw-r--r--drivers/video/backlight/ot200_bl.c162
-rw-r--r--drivers/video/backlight/pwm_bl.c10
-rw-r--r--drivers/video/backlight/qcom-wled.c15
-rw-r--r--drivers/video/backlight/sky81452-backlight.c52
-rw-r--r--drivers/video/backlight/tps65217_bl.c10
-rw-r--r--drivers/video/backlight/wm831x_bl.c13
-rw-r--r--drivers/video/fbdev/acornfb.c2
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c6
-rw-r--r--drivers/video/fbdev/cirrusfb.c4
-rw-r--r--drivers/video/fbdev/controlfb.c4
-rw-r--r--drivers/video/fbdev/core/fbcon.c25
-rw-r--r--drivers/video/fbdev/core/fbmem.c10
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c4
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c2
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c2
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/lcdc.c4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c4
-rw-r--r--drivers/video/fbdev/ps3fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c4
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s3c-fb.c6
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/fbdev/sis/sis_main.c8
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/fbdev/tdfxfb.c2
-rw-r--r--drivers/video/fbdev/via/lcd.c2
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/virtio/virtio_balloon.c30
-rw-r--r--drivers/virtio/virtio_input.c32
-rw-r--r--drivers/virtio/virtio_mem.c30
-rw-r--r--drivers/virtio/virtio_pci_modern.c7
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/virtio/virtio_vdpa.c9
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/advantechwdt.c2
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/ar7_wdt.c3
-rw-r--r--drivers/watchdog/ath79_wdt.c2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c2
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/dw_wdt.c439
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c54
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/ib700wdt.c2
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/m54xx_wdt.c2
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/mlx_wdt.c73
-rw-r--r--drivers/watchdog/mv64x60_wdt.c2
-rw-r--r--drivers/watchdog/nv_tco.c4
-rw-r--r--drivers/watchdog/nv_tco.h2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c7
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rti_wdt.c114
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sb_wdog.c2
-rw-r--r--drivers/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/scx200_wdt.c2
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c57
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c73
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/events/events_base.c16
-rw-r--r--drivers/xen/gntdev-dmabuf.c8
-rw-r--r--drivers/xen/pvcalls-front.c2
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c10
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
1587 files changed, 23177 insertions, 8138 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index c2b452af6806..9861302cc7db 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -290,7 +290,7 @@ static int vt_notifier_call(struct notifier_block *blk,
break;
case '\t':
c = ' ';
- /* Fallthrough */
+ fallthrough;
default:
if (c < 32)
/* Ignore other control sequences */
diff --git a/drivers/accessibility/speakup/Kconfig b/drivers/accessibility/speakup/Kconfig
index 0803c2013cf4..07ecbbde0384 100644
--- a/drivers/accessibility/speakup/Kconfig
+++ b/drivers/accessibility/speakup/Kconfig
@@ -42,6 +42,11 @@ config SPEAKUP
one of the listed synthesizers, you should say n.
if SPEAKUP
+
+config SPEAKUP_SERIALIO
+ def_bool y
+ depends on ISA || COMPILE_TEST
+
config SPEAKUP_SYNTH_ACNTSA
tristate "Accent SA synthesizer support"
help
@@ -52,7 +57,7 @@ config SPEAKUP_SYNTH_ACNTSA
config SPEAKUP_SYNTH_ACNTPC
tristate "Accent PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the accent pc
synthesizer. You can say y to build it into the kernel,
@@ -104,7 +109,7 @@ config SPEAKUP_SYNTH_DECEXT
config SPEAKUP_SYNTH_DECPC
depends on m
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
tristate "DECtalk PC (big ISA card) synthesizer support"
help
@@ -127,7 +132,7 @@ config SPEAKUP_SYNTH_DECPC
config SPEAKUP_SYNTH_DTLK
tristate "DoubleTalk PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the internal DoubleTalk
@@ -138,7 +143,7 @@ config SPEAKUP_SYNTH_DTLK
config SPEAKUP_SYNTH_KEYPC
tristate "Keynote Gold PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the Keynote Gold
diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile
index 5befb4933b85..6e4bfac8af65 100644
--- a/drivers/accessibility/speakup/Makefile
+++ b/drivers/accessibility/speakup/Makefile
@@ -25,8 +25,8 @@ speakup-y := \
keyhelp.o \
kobjects.o \
selection.o \
- serialio.o \
spk_ttyio.o \
synth.o \
thread.o \
varhandlers.o
+speakup-$(CONFIG_SPEAKUP_SERIALIO) += serialio.o
diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c
index 177a2988641c..403b01d66367 100644
--- a/drivers/accessibility/speakup/serialio.c
+++ b/drivers/accessibility/speakup/serialio.c
@@ -32,6 +32,7 @@ static void spk_serial_tiocmset(unsigned int set, unsigned int clear);
static unsigned char spk_serial_in(void);
static unsigned char spk_serial_in_nowait(void);
static void spk_serial_flush_buffer(void);
+static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_serial_io_ops = {
.synth_out = spk_serial_out,
@@ -40,6 +41,7 @@ struct spk_io_ops spk_serial_io_ops = {
.synth_in = spk_serial_in,
.synth_in_nowait = spk_serial_in_nowait,
.flush_buffer = spk_serial_flush_buffer,
+ .wait_for_xmitr = spk_serial_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_serial_io_ops);
@@ -211,7 +213,7 @@ void spk_stop_serial_interrupt(void)
}
EXPORT_SYMBOL_GPL(spk_stop_serial_interrupt);
-int spk_wait_for_xmitr(struct spk_synth *in_synth)
+static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth)
{
int tmout = SPK_XMITR_TIMEOUT;
@@ -280,7 +282,7 @@ static void spk_serial_flush_buffer(void)
static int spk_serial_out(struct spk_synth *in_synth, const char ch)
{
- if (in_synth->alive && spk_wait_for_xmitr(in_synth)) {
+ if (in_synth->alive && spk_serial_wait_for_xmitr(in_synth)) {
outb_p(ch, speakup_info.port_tts);
return 1;
}
@@ -295,7 +297,7 @@ const char *spk_serial_synth_immediate(struct spk_synth *synth,
while ((ch = *buff)) {
if (ch == '\n')
ch = synth->procspeech;
- if (spk_wait_for_xmitr(synth))
+ if (spk_serial_wait_for_xmitr(synth))
outb(ch, speakup_info.port_tts);
else
return buff;
diff --git a/drivers/accessibility/speakup/spk_priv.h b/drivers/accessibility/speakup/spk_priv.h
index c75b40838794..0f4bcbe5ddb9 100644
--- a/drivers/accessibility/speakup/spk_priv.h
+++ b/drivers/accessibility/speakup/spk_priv.h
@@ -34,7 +34,6 @@
const struct old_serial_port *spk_serial_init(int index);
void spk_stop_serial_interrupt(void);
-int spk_wait_for_xmitr(struct spk_synth *in_synth);
void spk_serial_release(void);
void spk_ttyio_release(void);
void spk_ttyio_register_ldisc(void);
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 9b95f77f9265..a831ff64f8ba 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -116,6 +116,7 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear);
static unsigned char spk_ttyio_in(void);
static unsigned char spk_ttyio_in_nowait(void);
static void spk_ttyio_flush_buffer(void);
+static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
.synth_out = spk_ttyio_out,
@@ -125,6 +126,7 @@ struct spk_io_ops spk_ttyio_ops = {
.synth_in = spk_ttyio_in,
.synth_in_nowait = spk_ttyio_in_nowait,
.flush_buffer = spk_ttyio_flush_buffer,
+ .wait_for_xmitr = spk_ttyio_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_ttyio_ops);
@@ -286,6 +288,11 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
mutex_unlock(&speakup_tty_mutex);
}
+static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
+{
+ return 1;
+}
+
static unsigned char ttyio_in(int timeout)
{
struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index d3272c6d199a..7398f1196e10 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -158,6 +158,7 @@ struct spk_io_ops {
unsigned char (*synth_in)(void);
unsigned char (*synth_in_nowait)(void);
void (*flush_buffer)(void);
+ int (*wait_for_xmitr)(struct spk_synth *synth);
};
struct spk_synth {
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index 3568bfb89912..ac47dbac7207 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -159,7 +159,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
{
if (synth->alive)
return 1;
- if (spk_wait_for_xmitr(synth) > 0) {
+ if (synth->io_ops->wait_for_xmitr(synth) > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ba2612e9a0eb..806b8ce05624 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -8,7 +8,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/platform_data/clk-st.h>
+#include <linux/platform_data/clk-fch.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/clkdev.h>
@@ -79,11 +79,12 @@ static int misc_check_res(struct acpi_resource *ares, void *data)
return !acpi_dev_resource_memory(ares, &res);
}
-static int st_misc_setup(struct apd_private_data *pdata)
+static int fch_misc_setup(struct apd_private_data *pdata)
{
struct acpi_device *adev = pdata->adev;
+ const union acpi_object *obj;
struct platform_device *clkdev;
- struct st_clk_data *clk_data;
+ struct fch_clk_data *clk_data;
struct resource_entry *rentry;
struct list_head resource_list;
int ret;
@@ -98,6 +99,9 @@ static int st_misc_setup(struct apd_private_data *pdata)
if (ret < 0)
return -ENOENT;
+ if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj))
+ clk_data->is_rv = obj->integer.value;
+
list_for_each_entry(rentry, &resource_list, node) {
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
resource_size(rentry->res));
@@ -106,7 +110,7 @@ static int st_misc_setup(struct apd_private_data *pdata)
acpi_dev_free_resource_list(&resource_list);
- clkdev = platform_device_register_data(&adev->dev, "clk-st",
+ clkdev = platform_device_register_data(&adev->dev, "clk-fch",
PLATFORM_DEVID_NONE, clk_data,
sizeof(*clk_data));
return PTR_ERR_OR_ZERO(clkdev);
@@ -135,8 +139,8 @@ static const struct apd_device_desc cz_uart_desc = {
.properties = uart_properties,
};
-static const struct apd_device_desc st_misc_desc = {
- .setup = st_misc_setup,
+static const struct apd_device_desc fch_misc_desc = {
+ .setup = fch_misc_setup,
};
#endif
@@ -239,7 +243,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
- { "AMD0040", APD_ADDR(st_misc_desc)},
+ { "AMD0040", APD_ADDR(fch_misc_desc)},
+ { "HYGO0010", APD_ADDR(wt_i2c_desc) },
#endif
#ifdef CONFIG_ARM64
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7c138a4edc03..26dd208a0d63 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -73,6 +73,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
}
EXPORT_SYMBOL(to_nfit_uuid);
+static const guid_t *to_nfit_bus_uuid(int family)
+{
+ if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
+ "only secondary bus families can be translated\n"))
+ return NULL;
+ /*
+ * The index of bus UUIDs starts immediately following the last
+ * NVDIMM/leaf family.
+ */
+ return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
+}
+
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -362,24 +374,8 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
{
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
[NVDIMM_FAMILY_INTEL] = {
- [NVDIMM_INTEL_GET_MODES] = 2,
- [NVDIMM_INTEL_GET_FWINFO] = 2,
- [NVDIMM_INTEL_START_FWUPDATE] = 2,
- [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
- [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
- [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
- [NVDIMM_INTEL_SET_THRESHOLD] = 2,
- [NVDIMM_INTEL_INJECT_ERROR] = 2,
- [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
- [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
- [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
- [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
- [NVDIMM_INTEL_FREEZE_LOCK] = 2,
- [NVDIMM_INTEL_SECURE_ERASE] = 2,
- [NVDIMM_INTEL_OVERWRITE] = 2,
- [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
- [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
- [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
+ [NVDIMM_INTEL_GET_MODES ...
+ NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
},
};
u8 id;
@@ -406,7 +402,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
}
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
- struct nd_cmd_pkg *call_pkg)
+ struct nd_cmd_pkg *call_pkg, int *family)
{
if (call_pkg) {
int i;
@@ -417,6 +413,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
if (call_pkg->nd_reserved2[i])
return -EINVAL;
+ *family = call_pkg->nd_family;
return call_pkg->nd_command;
}
@@ -450,13 +447,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
acpi_handle handle;
const guid_t *guid;
int func, rc, i;
+ int family = 0;
if (cmd_rc)
*cmd_rc = -EINVAL;
if (cmd == ND_CMD_CALL)
call_pkg = buf;
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
@@ -478,9 +476,17 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
- dsm_mask = nd_desc->bus_dsm_mask;
+ if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
+ family = call_pkg->nd_family;
+ if (!test_bit(family, &nd_desc->bus_family_mask))
+ return -EINVAL;
+ dsm_mask = acpi_desc->family_dsm_mask[family];
+ guid = to_nfit_bus_uuid(family);
+ } else {
+ dsm_mask = acpi_desc->bus_dsm_mask;
+ guid = to_nfit_uuid(NFIT_DEV_BUS);
+ }
desc = nd_cmd_bus_desc(cmd);
- guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
dimm_name = "bus";
}
@@ -516,8 +522,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.length = call_pkg->nd_size_in;
}
- dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
- dimm_name, cmd, func, in_buf.buffer.length);
+ dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
+ dimm_name, cmd, family, func, in_buf.buffer.length);
if (payload_dumpable(nvdimm, func))
print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
in_buf.buffer.pointer,
@@ -1238,8 +1244,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev,
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
+ return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
@@ -1385,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
- if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
- return 0;
+ if (a == &dev_attr_scrub.attr)
+ return ars_supported(nvdimm_bus) ? a->mode : 0;
+
+ if (a == &dev_attr_firmware_activate_noidle.attr)
+ return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
+
return a->mode;
}
@@ -1395,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr,
&dev_attr_bus_dsm_mask.attr,
+ &dev_attr_firmware_activate_noidle.attr,
NULL,
};
@@ -1823,6 +1835,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem)
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
unsigned long dsm_mask, label_mask;
@@ -1834,6 +1847,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/* nfit test assumes 1:1 relationship between commands and dsms */
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
+ set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
@@ -1886,10 +1900,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
* Note, that checking for function0 (bit0) tells us if any commands
* are reachable through this GUID.
*/
+ clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
+ set_bit(i, &nd_desc->dimm_family_mask);
if (family < 0 || i == default_dsm_family)
family = i;
+ }
/* limit the supported commands to those that are publicly documented */
nfit_mem->family = family;
@@ -2007,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
}
}
+static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
+ struct nfit_mem *nfit_mem)
+{
+ unsigned long mask;
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+ if (!nd_desc->fw_ops)
+ return NULL;
+
+ if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
+ return NULL;
+
+ mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
+ if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+ return NULL;
+
+ return intel_fw_ops;
+}
+
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_mem *nfit_mem;
@@ -2083,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
acpi_nfit_dimm_attribute_groups,
flags, cmd_mask, flush ? flush->hint_count : 0,
nfit_mem->flush_wpq, &nfit_mem->id[0],
- acpi_nfit_get_security_ops(nfit_mem->family));
+ acpi_nfit_get_security_ops(nfit_mem->family),
+ acpi_nfit_get_fw_ops(nfit_mem));
if (!nvdimm)
return -ENOMEM;
@@ -2147,12 +2185,23 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
+ unsigned long dsm_mask, *mask;
struct acpi_device *adev;
- unsigned long dsm_mask;
int i;
- nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
- nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
+ set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
+ set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
+
+ /* enable nfit_test to inject bus command emulation */
+ if (acpi_desc->bus_cmd_force_en) {
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ mask = &nd_desc->bus_family_mask;
+ if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
+ }
+
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -2160,7 +2209,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, &nd_desc->cmd_mask);
- set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
dsm_mask =
(1 << ND_CMD_ARS_CAP) |
@@ -2173,7 +2221,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
(1 << NFIT_CMD_ARS_INJECT_GET);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
- set_bit(i, &nd_desc->bus_dsm_mask);
+ set_bit(i, &acpi_desc->bus_dsm_mask);
+
+ /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
+ dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+ guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
+ set_bit(i, mask);
+
+ if (*mask == dsm_mask) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
}
static ssize_t range_index_show(struct device *dev,
@@ -3273,7 +3334,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc;
+ int rc, do_sched_ars = 0;
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
@@ -3285,7 +3346,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
}
}
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3293,6 +3354,13 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
+
+ /*
+ * Kick off background ARS if at least one
+ * region successfully registered ARS
+ */
+ if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ do_sched_ars++;
break;
case NFIT_SPA_BDW:
/* nothing to register */
@@ -3311,8 +3379,10 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
/* don't register unknown regions */
break;
}
+ }
- sched_ars(acpi_desc);
+ if (do_sched_ars)
+ sched_ars(acpi_desc);
return 0;
}
@@ -3485,7 +3555,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
-/* prevent security commands from being issued via ioctl */
+/*
+ * Prevent security and firmware activate commands from being issued via
+ * ioctl.
+ */
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf)
{
@@ -3496,10 +3569,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
func = call_pkg->nd_command;
if (func > NVDIMM_CMD_MAX ||
- (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
return -EOPNOTSUPP;
}
+ /* block all non-nfit bus commands */
+ if (!nvdimm && cmd == ND_CMD_CALL &&
+ call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
+ return -EOPNOTSUPP;
+
return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
}
@@ -3791,6 +3869,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
+ guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 1113b679cd7b..8dd792a55730 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -7,6 +7,48 @@
#include "intel.h"
#include "nfit.h"
+static ssize_t firmware_activate_noidle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
+}
+
+static ssize_t firmware_activate_noidle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ ssize_t rc;
+ bool val;
+
+ rc = kstrtobool(buf, &val);
+ if (rc)
+ return rc;
+ if (val != acpi_desc->fwa_noidle)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
+ acpi_desc->fwa_noidle = val;
+ return size;
+}
+DEVICE_ATTR_RW(firmware_activate_noidle);
+
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ unsigned long *mask;
+
+ if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
+ return false;
+
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+}
+
static unsigned long intel_security_flags(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type ptype)
{
@@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = {
};
const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
+
+static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
+ struct nd_intel_bus_fw_activate_businfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate_businfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ },
+ };
+ int rc;
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+/* The fw_ops expect to be called with the nvdimm_bus_lock() held */
+static enum nvdimm_fwa_state intel_bus_fwa_state(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct nd_intel_bus_fw_activate_businfo info;
+ struct device *dev = acpi_desc->dev;
+ enum nvdimm_fwa_state state;
+ int rc;
+
+ /*
+ * It should not be possible for platform firmware to return
+ * busy because activate is a synchronous operation. Treat it
+ * similar to invalid, i.e. always refresh / poll the status.
+ */
+ switch (acpi_desc->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* check if capability needs to be refreshed */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
+ break;
+ return acpi_desc->fwa_state;
+ }
+
+ /* Refresh with platform firmware */
+ rc = intel_bus_fwa_businfo(nd_desc, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ if (info.activate_tmo > info.max_quiesce_tmo)
+ state = NVDIMM_FWA_ARM_OVERFLOW;
+ else
+ state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ dev_err_once(dev, "invalid firmware activate state %d\n",
+ info.state);
+ return NVDIMM_FWA_INVALID;
+ }
+
+ /*
+ * Capability data is available in the same payload as state. It
+ * is expected to be static.
+ */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
+ if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
+ else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
+ /*
+ * Skip hibernate cycle by default if platform
+ * indicates that it does not need devices to be
+ * quiesced.
+ */
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
+ } else
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
+ }
+
+ acpi_desc->fwa_state = state;
+
+ return state;
+}
+
+static enum nvdimm_fwa_capability intel_bus_fwa_capability(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
+ return acpi_desc->fwa_cap;
+
+ if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
+ return acpi_desc->fwa_cap;
+
+ return NVDIMM_FWA_CAP_INVALID;
+}
+
+static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ },
+ /*
+ * Even though activate is run from a suspended context,
+ * for safety, still ask platform firmware to force
+ * quiesce devices by default. Let a module
+ * parameter override that policy.
+ */
+ .cmd = {
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
+ },
+ };
+ int rc;
+
+ switch (intel_bus_fwa_state(nd_desc)) {
+ case NVDIMM_FWA_ARMED:
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+
+ /*
+ * Whether the command succeeded, or failed, the agent checking
+ * for the result needs to query the DIMMs individually.
+ * Increment the activation count to invalidate all the DIMM
+ * states at once (it's otherwise not possible to take
+ * acpi_desc->init_mutex in this context)
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ acpi_desc->fwa_count++;
+
+ dev_dbg(acpi_desc->dev, "result: %d\n", rc);
+
+ return rc;
+}
+
+static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
+ .activate_state = intel_bus_fwa_state,
+ .capability = intel_bus_fwa_capability,
+ .activate = intel_bus_fwa_activate,
+};
+
+const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
+
+static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
+ struct nd_intel_fw_activate_dimminfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_dimminfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ },
+ };
+ int rc;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nd_intel_fw_activate_dimminfo info;
+ int rc;
+
+ /*
+ * Similar to the bus state, since activate is synchronous the
+ * busy state should resolve within the context of 'activate'.
+ */
+ switch (nfit_mem->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* If no activations occurred the old state is still valid */
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count)
+ return nfit_mem->fwa_state;
+ }
+
+ rc = intel_fwa_dimminfo(nvdimm, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+ break;
+ }
+
+ switch (info.result) {
+ case ND_INTEL_DIMM_FWA_NONE:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
+ break;
+ case ND_INTEL_DIMM_FWA_SUCCESS:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
+ break;
+ case ND_INTEL_DIMM_FWA_NOTSTAGED:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
+ break;
+ case ND_INTEL_DIMM_FWA_NEEDRESET:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
+ break;
+ case ND_INTEL_DIMM_FWA_MEDIAFAILED:
+ case ND_INTEL_DIMM_FWA_ABORT:
+ case ND_INTEL_DIMM_FWA_NOTSUPP:
+ case ND_INTEL_DIMM_FWA_ERROR:
+ default:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
+ break;
+ }
+
+ nfit_mem->fwa_count = acpi_desc->fwa_count;
+
+ return nfit_mem->fwa_state;
+}
+
+static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count
+ && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
+ return nfit_mem->fwa_result;
+
+ if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
+ return nfit_mem->fwa_result;
+
+ return NVDIMM_FWA_RESULT_INVALID;
+}
+
+static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_arm cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_arm),
+ },
+ .cmd = {
+ .activate_arm = arm == NVDIMM_FWA_ARM
+ ? ND_INTEL_DIMM_FWA_ARM
+ : ND_INTEL_DIMM_FWA_DISARM,
+ },
+ };
+ int rc;
+
+ switch (intel_fwa_state(nvdimm)) {
+ case NVDIMM_FWA_INVALID:
+ return -ENXIO;
+ case NVDIMM_FWA_BUSY:
+ return -EBUSY;
+ case NVDIMM_FWA_IDLE:
+ if (arm == NVDIMM_FWA_DISARM)
+ return 0;
+ break;
+ case NVDIMM_FWA_ARMED:
+ if (arm == NVDIMM_FWA_ARM)
+ return 0;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ /*
+ * Invalidate the bus-level state, now that we're committed to
+ * changing the 'arm' state.
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+
+ dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
+ ? "arm" : "disarm", rc);
+ return rc;
+}
+
+static const struct nvdimm_fw_ops __intel_fw_ops = {
+ .activate_state = intel_fwa_state,
+ .activate_result = intel_fwa_result,
+ .arm = intel_fwa_arm,
+};
+
+const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
index 0aca682ab9d7..b768234ccebc 100644
--- a/drivers/acpi/nfit/intel.h
+++ b/drivers/acpi/nfit/intel.h
@@ -111,4 +111,65 @@ struct nd_intel_master_secure_erase {
u8 passphrase[ND_INTEL_PASSPHRASE_SIZE];
u32 status;
} __packed;
+
+#define ND_INTEL_FWA_IDLE 0
+#define ND_INTEL_FWA_ARMED 1
+#define ND_INTEL_FWA_BUSY 2
+
+#define ND_INTEL_DIMM_FWA_NONE 0
+#define ND_INTEL_DIMM_FWA_NOTSTAGED 1
+#define ND_INTEL_DIMM_FWA_SUCCESS 2
+#define ND_INTEL_DIMM_FWA_NEEDRESET 3
+#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4
+#define ND_INTEL_DIMM_FWA_ABORT 5
+#define ND_INTEL_DIMM_FWA_NOTSUPP 6
+#define ND_INTEL_DIMM_FWA_ERROR 7
+
+struct nd_intel_fw_activate_dimminfo {
+ u32 status;
+ u16 result;
+ u8 state;
+ u8 reserved[7];
+} __packed;
+
+#define ND_INTEL_DIMM_FWA_ARM 1
+#define ND_INTEL_DIMM_FWA_DISARM 0
+
+struct nd_intel_fw_activate_arm {
+ u8 activate_arm;
+ u32 status;
+} __packed;
+
+/* Root device command payloads */
+#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0)
+#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1)
+#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2)
+
+struct nd_intel_bus_fw_activate_businfo {
+ u32 status;
+ u16 reserved;
+ u8 state;
+ u8 capability;
+ u64 activate_tmo;
+ u64 cpu_quiesce_tmo;
+ u64 io_quiesce_tmo;
+ u64 max_quiesce_tmo;
+} __packed;
+
+#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16)
+
+#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0)
+#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1)
+struct nd_intel_bus_fw_activate {
+ u8 iodev_state;
+ u32 status;
+} __packed;
+
+extern const struct nvdimm_fw_ops *intel_fw_ops;
+extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops;
#endif
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index a303f0123394..c674f3df9be7 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -18,6 +18,7 @@
/* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
@@ -33,7 +34,6 @@
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_CMD_MAX 31
#define NVDIMM_STANDARD_CMDMASK \
@@ -66,6 +66,13 @@ enum nvdimm_family_cmds {
NVDIMM_INTEL_QUERY_OVERWRITE = 26,
NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27,
NVDIMM_INTEL_MASTER_SECURE_ERASE = 28,
+ NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29,
+ NVDIMM_INTEL_FW_ACTIVATE_ARM = 30,
+};
+
+enum nvdimm_bus_family_cmds {
+ NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1,
+ NVDIMM_BUS_INTEL_FW_ACTIVATE = 2,
};
#define NVDIMM_INTEL_SECURITY_CMDMASK \
@@ -76,13 +83,22 @@ enum nvdimm_family_cmds {
| 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \
| 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE)
+#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM)
+
+#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE)
+
#define NVDIMM_INTEL_CMDMASK \
(NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
| 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
| 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
| 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
| 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \
- | NVDIMM_INTEL_SECURITY_CMDMASK)
+ | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+
+#define NVDIMM_INTEL_DENY_CMDMASK \
+(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
@@ -91,6 +107,11 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
+ /*
+ * to_nfit_bus_uuid() expects to translate bus uuid family ids
+ * to a UUID index using NVDIMM_FAMILY_MAX as an offset
+ */
+ NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -199,6 +220,9 @@ struct nfit_mem {
struct list_head list;
struct acpi_device *adev;
struct acpi_nfit_desc *acpi_desc;
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_result fwa_result;
+ int fwa_count;
char id[NFIT_DIMM_ID_LEN+1];
struct resource *flush_wpq;
unsigned long dsm_mask;
@@ -238,11 +262,17 @@ struct acpi_nfit_desc {
unsigned long scrub_flags;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
- unsigned long bus_nfit_cmd_force_en;
+ unsigned long bus_dsm_mask;
+ unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
unsigned int platform_cap;
unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_capability fwa_cap;
+ int fwa_count;
+ bool fwa_noidle;
+ bool fwa_nosuspend;
};
enum scrub_mode {
@@ -345,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event);
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
+extern struct device_attribute dev_attr_firmware_activate_noidle;
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6ad8cb05f672..4a0b07792233 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -350,7 +350,7 @@ void __iomem __ref
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = acpi_map(pg_off, pg_sz);
+ virt = acpi_map(phys, size);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
@@ -358,7 +358,7 @@ void __iomem __ref
}
INIT_LIST_HEAD(&map->list);
- map->virt = virt;
+ map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
map->phys = pg_off;
map->size = pg_sz;
map->track.refcount = 1;
@@ -1575,11 +1575,26 @@ static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
u32 level)
{
+ acpi_status status;
+
if (!(res->flags & IORESOURCE_MEM))
return AE_TYPE;
- return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
- acpi_deactivate_mem_region, NULL, res, NULL);
+ status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+ acpi_deactivate_mem_region, NULL,
+ res, NULL);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ /*
+ * Wait for all of the mappings queued up for removal by
+ * acpi_deactivate_mem_region() to actually go away.
+ */
+ synchronize_rcu();
+ rcu_barrier();
+ flush_scheduled_work();
+
+ return AE_OK;
}
EXPORT_SYMBOL_GPL(acpi_release_memory);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6853dbb4131d..49f7acbfcf01 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -470,7 +470,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
switch (priv->version) {
case BRCM_SATA_BCM7425:
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
- /* fall through */
+ fallthrough;
case BRCM_SATA_NSP:
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 129556fcf6be..86261deeb4c5 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -326,7 +326,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
node);
break;
}
- /* fall through */
+ fallthrough;
case -ENODEV:
/* continue normally */
hpriv->phys[port] = NULL;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b1cd4d97bc2a..1a82058defdb 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -190,7 +190,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
case ATA_LITER_PMP_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_HOST_FIRST:
return &ap->link;
}
@@ -201,11 +201,11 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
case ATA_LITER_HOST_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_PMP_FIRST:
if (unlikely(ap->slave_link))
return ap->slave_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_EDGE:
return NULL;
}
@@ -523,7 +523,7 @@ int atapi_cmd_type(u8 opcode)
case ATA_12:
if (atapi_passthru16)
return ATAPI_PASS_THRU;
- /* fall thru */
+ fallthrough;
default:
return ATAPI_MISC;
}
@@ -1800,7 +1800,7 @@ retry:
switch (class) {
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
- /* fall through */
+ fallthrough;
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
@@ -2907,7 +2907,7 @@ int ata_bus_probe(struct ata_port *ap)
case -ENODEV:
/* give it just one more chance */
tries[dev->devno] = min(tries[dev->devno], 1);
- /* fall through */
+ fallthrough;
case -EIO:
if (tries[dev->devno] == 1) {
/* This is the last chance, better to slow
@@ -3158,7 +3158,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
case ATA_DNXFER_FORCE_PIO0:
pio_mask &= 1;
- /* fall through */
+ fallthrough;
case ATA_DNXFER_FORCE_PIO:
mwdma_mask = 0;
udma_mask = 0;
@@ -4694,7 +4694,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
qc->tf.feature != SETFEATURES_RA_ON &&
qc->tf.feature != SETFEATURES_RA_OFF)
break;
- /* fall through */
+ fallthrough;
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
/* revalidate device */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 474c6c34fe02..d912eaa65c94 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1576,7 +1576,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
ata_eh_request_sense(qc, qc->scsicmd);
- /* fall through */
+ fallthrough;
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
@@ -3473,11 +3473,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
case -ENODEV:
/* device missing or wrong IDENTIFY data, schedule probing */
ehc->i.probe_mask |= (1 << dev->devno);
- /* fall through */
+ fallthrough;
case -EINVAL:
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
- /* fall through */
+ fallthrough;
case -EIO:
if (ehc->tries[dev->devno] == 1) {
/* This is the last chance, better to slow
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ec233208585b..4ce4cd32508c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4162,7 +4162,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
break;
}
- /* Fallthrough */
+ fallthrough;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
@@ -4198,7 +4198,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
- /* fall through */
+ fallthrough;
/* no-op's, complete with success */
case REZERO_UNIT:
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index e01a3a6e4d46..2bc5fc81efe3 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -157,7 +157,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
default:
printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
- /* fall through */
+ fallthrough;
case 9 ... 12:
clocks = 7; /* 12 clk */
break;
@@ -190,7 +190,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
default:
printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
- /* fall through */
+ fallthrough;
case 12: /* default 12 clk */
clocks = 0;
break;
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 916bf024d737..7511e11eef4d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -369,7 +369,7 @@ static int serverworks_fixup(struct pci_dev *pdev)
break;
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
ata_pci_bmdma_clear_simplex(pdev);
- /* fall through */
+ fallthrough;
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
rc = serverworks_fixup_csb(pdev);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d7228f8e9297..664ef658a955 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2010,7 +2010,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
break;
case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
- /* fall through */
+ fallthrough;
case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT;
break;
@@ -2044,7 +2044,7 @@ static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return AC_ERR_OK;
- /* fall-thru */
+ fallthrough;
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2296,7 +2296,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
- /* fall through */
+ fallthrough;
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_FIRST;
break;
@@ -2347,7 +2347,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
- /* fall thru */
+ fallthrough;
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
@@ -2376,7 +2376,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
- /* fall through */
+ fallthrough;
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
@@ -3864,7 +3864,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
- /* fall through */
+ fallthrough;
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 8729f78cef5f..7815da8ef9e5 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -637,7 +637,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_NODATA:
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
@@ -652,7 +652,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
break;
case ATAPI_PROT_DMA:
pdc_fill_sg(qc);
- /*FALLTHROUGH*/
+ fallthrough;
case ATAPI_PROT_NODATA:
pdc_atapi_pkt(qc);
break;
@@ -1022,11 +1022,11 @@ static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
case ATAPI_PROT_NODATA:
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATAPI_PROT_DMA:
case ATA_PROT_DMA:
pdc_packet_start(qc);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 2c7b30c5ea3d..4c01190a5e37 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -669,7 +669,7 @@ static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_DMA:
pdc20621_packet_start(qc);
return 0;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 2ca9ec802734..c798856e74a4 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -711,7 +711,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
switch (STATUS_CODE (qe)) {
case 0x01: /* This is for AAL0 where we put the chip in streaming mode */
- /* Fall through */
+ fallthrough;
case 0x02:
/* Process a real txdone entry. */
tmp = qe->p0;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index a81bc49c14ac..9a70bee84125 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -376,33 +376,33 @@ fore200e_shutdown(struct fore200e* fore200e)
case FORE200E_STATE_COMPLETE:
kfree(fore200e->stats);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_IRQ:
free_irq(fore200e->irq, fore200e->atm_dev);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_ALLOC_BUF:
fore200e_free_rx_buf(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_BSQ:
fore200e_uninit_bs_queue(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_RXQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_TXQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_CMDQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INITIALIZE:
/* nothing to do for that state */
@@ -415,7 +415,7 @@ fore200e_shutdown(struct fore200e* fore200e)
case FORE200E_STATE_MAP:
fore200e->bus->unmap(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_CONFIGURE:
/* nothing to do for that state */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 8af793f5e811..17f44abc9418 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1944,14 +1944,14 @@ he_tasklet(unsigned long data)
switch (type) {
case ITYPE_RBRQ_THRESH:
HPRINTK("rbrq%d threshold\n", group);
- /* fall through */
+ fallthrough;
case ITYPE_RBRQ_TIMER:
if (he_service_rbrq(he_dev, group))
he_service_rbpl(he_dev, group);
break;
case ITYPE_TBRQ_THRESH:
HPRINTK("tbrq%d threshold\n", group);
- /* fall through */
+ fallthrough;
case ITYPE_TPD_COMPLETE:
he_service_tbrq(he_dev, group);
break;
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 63871859e6e8..3c081b6171a8 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -192,7 +192,7 @@ static int idt77105_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
switch (cmd) {
case IDT77105_GETSTATZ:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- /* fall through */
+ fallthrough;
case IDT77105_GETSTAT:
return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ);
case ATM_SETLOOP:
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 986c1313694c..ac811cfa6843 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2019,7 +2019,7 @@ static int lanai_normalize_ci(struct lanai_dev *lanai,
switch (*vpip) {
case ATM_VPI_ANY:
*vpip = 0;
- /* FALLTHROUGH */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index ee059c77e3bb..cf5fffcf98a1 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1447,7 +1447,7 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
switch (cmd) {
case ZATM_GETPOOLZ:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- /* fall through */
+ fallthrough;
case ZATM_GETPOOL:
{
struct zatm_pool_info info;
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 99980aa3644b..1c82d824ae00 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1365,7 +1365,7 @@ static void panel_process_inputs(void)
break;
input->rise_timer = 0;
input->state = INPUT_ST_RISING;
- /* fall through */
+ fallthrough;
case INPUT_ST_RISING:
if ((phys_curr & input->mask) != input->value) {
input->state = INPUT_ST_LOW;
@@ -1378,11 +1378,11 @@ static void panel_process_inputs(void)
}
input->high_timer = 0;
input->state = INPUT_ST_HIGH;
- /* fall through */
+ fallthrough;
case INPUT_ST_HIGH:
if (input_state_high(input))
break;
- /* fall through */
+ fallthrough;
case INPUT_ST_FALLING:
input_state_falling(input);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ac1046a382bc..f6f620aa9408 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -4264,9 +4264,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
- if (fwnode) {
- struct fwnode_handle *fn = dev->fwnode;
+ struct fwnode_handle *fn = dev->fwnode;
+ if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
@@ -4276,8 +4276,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
}
dev->fwnode = fwnode;
} else {
- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
- dev->fwnode->secondary : NULL;
+ if (fwnode_is_primary(fn)) {
+ dev->fwnode = fn->secondary;
+ fn->secondary = NULL;
+ } else {
+ dev->fwnode = NULL;
+ }
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 5327bfc6ba71..283ca2de76d4 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -289,10 +289,10 @@ static ssize_t firmware_loading_store(struct device *dev,
}
break;
}
- /* fallthrough */
+ fallthrough;
default:
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- /* fallthrough */
+ fallthrough;
case -1:
fw_load_abort(fw_sysfs);
break;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9dd85bea4026..205a06752ca9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1606,13 +1606,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3cf9bc5d8d95..6dba41395155 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1135,7 +1135,7 @@ noskb: if (buf)
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
- /* fall through */
+ fallthrough;
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 1553d41f0b91..a50e13af0305 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,7 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
- /* Fall through */
+ fallthrough;
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index fe6cb99eb917..740e93bad21f 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1733,7 +1733,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
+ fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
/* Remember whether we saw a READ or WRITE error.
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index cb687ccdbd96..04b6bde9419d 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -430,7 +430,7 @@ int drbd_thread_start(struct drbd_thread *thi)
thi->t_state = RESTARTING;
drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
- /* fall through */
+ fallthrough;
case RUNNING:
case RESTARTING:
default:
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 28eb078f8b75..43c8ae4d9fca 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -3883,7 +3883,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (nla_put_u32(skb, T_helper_exit_code,
sib->helper_exit_code))
goto nla_put_failure;
- /* fall through */
+ fallthrough;
case SIB_HELPER_PRE:
if (nla_put_string(skb, T_helper, sib->helper_name))
goto nla_put_failure;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1d17593f5d2b..422363daa618 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1797,7 +1797,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
break;
else
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
- /* Fall through */
+ fallthrough;
case WO_BDEV_FLUSH:
case WO_DRAIN_IO:
@@ -2917,7 +2917,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
- /* fall through */
+ fallthrough;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -3083,7 +3083,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
rv = 1;
break;
}
- /* Else fall through - to one of the other strategies... */
+ fallthrough; /* to one of the other strategies */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
@@ -3096,7 +3096,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
- /* fall through */
+ fallthrough;
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
@@ -3108,7 +3108,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- /* else, fall through */
+ fallthrough;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
@@ -3608,7 +3608,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(device, "pri-lost");
- /* fall through */
+ fallthrough;
case ASB_DISCONNECT:
drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 674be09b2da9..5c975af9c15f 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -611,7 +611,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_set_out_of_sync(device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
- /* fall through. */
+ fallthrough;
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
@@ -836,7 +836,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
} /* else: FIXME can this happen? */
break;
}
- /* else, fall through - to BARRIER_ACKED */
+ fallthrough; /* to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 09079aee8dc4..a563b023458a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1680,7 +1680,7 @@ static void recal_interrupt(void)
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
drive_state[current_drive].select_date = jiffies;
- /* fall through */
+ fallthrough;
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
@@ -3592,7 +3592,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
- /* fall through */
+ fallthrough;
case FDGETDRVSTAT:
outparam = &drive_state[drive];
break;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d18160146226..d3394191e168 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -878,6 +878,7 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
+ u32 granularity, max_discard_sectors;
/*
* If the backing device is a block device, mirror its zeroing
@@ -890,11 +891,10 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *backingq;
backingq = bdev_get_queue(inode->i_bdev);
- blk_queue_max_discard_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
- blk_queue_max_write_zeroes_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
+ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
+ granularity = backingq->limits.discard_granularity ?:
+ queue_physical_block_size(backingq);
/*
* We use punch hole to reclaim the free space used by the
@@ -903,23 +903,26 @@ static void loop_config_discard(struct loop_device *lo)
* useful information.
*/
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
- q->limits.discard_granularity = 0;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_max_write_zeroes_sectors(q, 0);
+ max_discard_sectors = 0;
+ granularity = 0;
} else {
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = 0;
-
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ max_discard_sectors = UINT_MAX >> 9;
+ granularity = inode->i_sb->s_blocksize;
}
- if (q->limits.max_write_zeroes_sectors)
+ if (max_discard_sectors) {
+ q->limits.discard_granularity = granularity;
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- else
+ } else {
+ q->limits.discard_granularity = 0;
+ blk_queue_max_discard_sectors(q, 0);
+ blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+ }
+ q->limits.discard_alignment = 0;
}
static void loop_unprepare_queue(struct loop_device *lo)
@@ -1111,8 +1114,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
mapping = file->f_mapping;
inode = mapping->host;
- size = get_loop_size(lo, file);
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1162,6 +1163,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
+
+ size = get_loop_size(lo, file);
loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
@@ -1171,6 +1174,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+ if (partscan)
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
/* Grab the block_device to prevent its destruction after we
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
@@ -1717,7 +1722,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_BLOCK_SIZE:
if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Fall through */
+ fallthrough;
default:
err = lo_simple_ioctl(lo, cmd, arg);
break;
@@ -1865,7 +1870,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_STATUS64:
case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
- /* fall through */
+ fallthrough;
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3ff4054d6834..edf8b632e3d2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1363,6 +1363,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
nbd->tag_set.timeout = timeout * HZ;
if (timeout)
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
+ else
+ blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
}
/* Must be called with config_lock held */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 47a9dad880af..d74443a9c8fa 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1147,7 +1147,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
- req_op(rq) & REQ_FUA);
+ rq->cmd_flags & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c0967507d085..a7af4f27b7c3 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -440,7 +440,7 @@ static void run_fsm(void)
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
- /* fall through */
+ fallthrough;
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
@@ -465,7 +465,7 @@ static void run_fsm(void)
if (stop)
return;
}
- /* fall through */
+ fallthrough;
case Hold:
schedule_fsm();
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 4becc1efe775..1034e445680c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2641,7 +2641,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- /* fall through */
+ fallthrough;
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d9c0e7d154f9..011539039693 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3293,7 +3293,7 @@ again:
case __RBD_OBJ_COPYUP_OBJECT_MAPS:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_OBJECT_MAPS:
if (*result) {
rbd_warn(rbd_dev, "snap object map update failed: %d",
@@ -3312,7 +3312,7 @@ again:
case __RBD_OBJ_COPYUP_WRITE_OBJECT:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_WRITE_OBJECT:
return true;
default:
@@ -3399,7 +3399,7 @@ again:
case __RBD_OBJ_WRITE_COPYUP:
if (!rbd_obj_advance_copyup(obj_req, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_WRITE_COPYUP:
if (*result) {
rbd_warn(rbd_dev, "copyup failed: %d", *result);
@@ -3592,7 +3592,7 @@ again:
case __RBD_IMG_OBJECT_REQUESTS:
if (!pending_result_dec(&img_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_IMG_OBJECT_REQUESTS:
return true;
default:
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index 5eddfd29ab64..b241a099aeae 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -45,7 +45,7 @@ void rnbd_dev_close(struct rnbd_dev *dev)
kfree(dev);
}
-static void rnbd_dev_bi_end_io(struct bio *bio)
+void rnbd_dev_bi_end_io(struct bio *bio)
{
struct rnbd_dev_blk_io *io = bio->bi_private;
@@ -63,8 +63,8 @@ static void rnbd_dev_bi_end_io(struct bio *bio)
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask)
+struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+ unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -99,36 +99,5 @@ static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
offset = 0;
}
- bio->bi_end_io = bio_put;
return bio;
}
-
-int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
- size_t len, u32 bi_size, enum rnbd_io_flags flags,
- short prio, void *priv)
-{
- struct rnbd_dev_blk_io *io;
- struct bio *bio;
-
- /* Generate bio with pages pointing to the rdma buffer */
- bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- io = container_of(bio, struct rnbd_dev_blk_io, bio);
-
- io->dev = dev;
- io->priv = priv;
-
- bio->bi_end_io = rnbd_dev_bi_end_io;
- bio->bi_private = io;
- bio->bi_opf = rnbd_to_bio_flags(flags);
- bio->bi_iter.bi_sector = sector;
- bio->bi_iter.bi_size = bi_size;
- bio_set_prio(bio, prio);
- bio_set_dev(bio, dev->bdev);
-
- submit_bio(bio);
-
- return 0;
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 0f65b09a270e..0eb23850afb9 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -41,6 +41,11 @@ void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error);
+void rnbd_dev_bi_end_io(struct bio *bio);
+
+struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+ unsigned int len, gfp_t gfp_mask);
+
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{
return queue_max_segments(bdev_get_queue(dev->bdev));
@@ -75,18 +80,4 @@ static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
return bdev_get_queue(dev->bdev)->limits.discard_alignment;
}
-/**
- * rnbd_dev_submit_io() - Submit an I/O to the disk
- * @dev: device to that the I/O is submitted
- * @sector: address to read/write data to
- * @data: I/O data to write or buffer to read I/O date into
- * @len: length of @data
- * @bi_size: Amount of data that will be read/written
- * @prio: IO priority
- * @priv: private data passed to @io_fn
- */
-int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
- size_t len, u32 bi_size, enum rnbd_io_flags flags,
- short prio, void *priv);
-
#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 86e61523907b..e1bc8b4cd592 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -124,6 +124,9 @@ static int process_rdma(struct rtrs_srv *sess,
struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id;
int err;
+ struct rnbd_dev_blk_io *io;
+ struct bio *bio;
+ short prio;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -142,18 +145,30 @@ static int process_rdma(struct rtrs_srv *sess,
priv->sess_dev = sess_dev;
priv->id = id;
- err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector),
- data, datalen, le32_to_cpu(msg->bi_size),
- le32_to_cpu(msg->rw),
- srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
- usrlen < sizeof(*msg) ?
- 0 : le16_to_cpu(msg->prio), priv);
- if (unlikely(err)) {
- rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
- err);
+ /* Generate bio with pages pointing to the rdma buffer */
+ bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
+ rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
goto sess_dev_put;
}
+ io = container_of(bio, struct rnbd_dev_blk_io, bio);
+ io->dev = sess_dev->rnbd_dev;
+ io->priv = priv;
+
+ bio->bi_end_io = rnbd_dev_bi_end_io;
+ bio->bi_private = io;
+ bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+ bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
+ bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
+ prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
+ usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
+ bio_set_prio(bio, prio);
+ bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
+
+ submit_bio(bio);
+
return 0;
sess_dev_put:
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 7e261224ff10..8799e3bab067 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -425,7 +425,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
* Fall through so the DMA devices can be attached and
* the user can attempt to pull off their data.
*/
- /* fall through */
+ fallthrough;
case CARD_STATE_GOOD:
st = rsxx_get_card_size8(card, &card->size8);
if (st)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3a476dc1d14f..ae6454c24594 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1436,7 +1436,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
blk_mq_requeue_request(req, true);
break;
}
- /* fall through */
+ fallthrough;
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 63b213e00b37..b2e48dac1ebd 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -126,16 +126,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
if (!range)
return -ENOMEM;
- __rq_for_each_bio(bio, req) {
- u64 sector = bio->bi_iter.bi_sector;
- u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-
- range[n].flags = cpu_to_le32(flags);
- range[n].num_sectors = cpu_to_le32(num_sectors);
- range[n].sector = cpu_to_le64(sector);
- n++;
+ /*
+ * Single max discard segment means multi-range discard isn't
+ * supported, and block layer only runs contiguity merge like
+ * normal RW request. So we can't reply on bio for retrieving
+ * each range info.
+ */
+ if (queue_max_discard_segments(req->q) == 1) {
+ range[0].flags = cpu_to_le32(flags);
+ range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
+ range[0].sector = cpu_to_le64(blk_rq_pos(req));
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 sector = bio->bi_iter.bi_sector;
+ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+ range[n].flags = cpu_to_le32(flags);
+ range[n].num_sectors = cpu_to_le32(num_sectors);
+ range[n].sector = cpu_to_le64(sector);
+ n++;
+ }
}
+ WARN_ON_ONCE(n != segments);
+
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
req->special_vec.bv_len = sizeof(*range) * segments;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c2f71265af4b..adfc9352351d 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1260,7 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
- /* fall through */
+ fallthrough;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 42944d41aea0..b9aa5d1ac10b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -843,7 +843,7 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through */
+ fallthrough;
/* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3bb3dd8da9b0..91de2e0755ae 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1403,7 +1403,6 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
case BLKIF_RSP_EOPNOTSUPP:
return REQ_EOPNOTSUPP;
case BLKIF_RSP_ERROR:
- /* Fallthrough. */
default:
return REQ_ERROR;
}
@@ -1643,7 +1642,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->feature_flush = 0;
xlvbd_flush(info);
}
- /* fall through */
+ fallthrough;
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
@@ -2484,7 +2483,7 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
if (info)
blkfront_closing(info);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index fb5a901fd89e..efb088df1276 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1849,7 +1849,7 @@ static int sysc_clockdomain_init(struct sysc *ddata)
switch (ddata->nr_clocks) {
case 2:
ick = ddata->clocks[SYSC_ICK];
- /* fallthrough */
+ fallthrough;
case 1:
fck = ddata->clocks[SYSC_FCK];
break;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 89527bae4602..760d9a931289 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -357,7 +357,7 @@ found:
default:
break;
}
- /*FALLTHROUGH*/
+ fallthrough;
default:
bridge->driver = &ali_generic_bridge;
}
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index d704cef64b64..055cfe59f519 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -92,8 +92,7 @@ static int ingenic_rng_probe(struct platform_device *pdev)
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
pr_err("%s: Failed to map RNG registers\n", __func__);
- ret = PTR_ERR(priv->base);
- goto err_free_rng;
+ return PTR_ERR(priv->base);
}
priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
@@ -106,17 +105,13 @@ static int ingenic_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&priv->rng);
if (ret) {
dev_err(&pdev->dev, "Failed to register hwrng\n");
- goto err_free_rng;
+ return ret;
}
platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
return 0;
-
-err_free_rng:
- kfree(priv);
- return ret;
}
static int ingenic_rng_remove(struct platform_device *pdev)
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index ed4dc3b1843e..f292e74bd4a5 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -99,7 +99,7 @@ static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc)
switch (kcs_bmc->phase) {
case KCS_PHASE_WRITE_START:
kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
- /* fall through */
+ fallthrough;
case KCS_PHASE_WRITE_DATA:
if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index bd95aba1f9fe..45932f05fd67 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -734,7 +734,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fall through - for 64-bit */
+ fallthrough; /* for 64-bit */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
@@ -762,7 +762,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fall through - for x32 mode */
+ fallthrough; /* for x32 mode */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 687d4af6945d..abd4ffdc8cde 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -791,7 +791,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
- /* fall through */
+ fallthrough;
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
if ((unsigned long long)offset >= -MAX_ERRNO) {
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 8206412d25ba..e9f694b36871 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -286,7 +286,7 @@ static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
#ifdef CONFIG_PPC
case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- /* fall through */
+ fallthrough;
case IOC_NVRAM_GET_OFFSET:
ret = -EINVAL;
#ifdef CONFIG_PPC_PMAC
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 690a2587e0c5..4026fac9fac3 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -50,7 +50,7 @@ source "drivers/clk/versatile/Kconfig"
config CLK_HSDK
bool "PLL Driver for HSDK platform"
depends on OF || COMPILE_TEST
- depends on IOMEM
+ depends on HAS_IOMEM
help
This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
control.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ca9af11d3391..da8fcf147eb1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
index e2007ac4d235..61bb224f6330 100644
--- a/drivers/clk/actions/owl-s500.c
+++ b/drivers/clk/actions/owl-s500.c
@@ -23,8 +23,10 @@
#include "owl-gate.h"
#include "owl-mux.h"
#include "owl-pll.h"
+#include "owl-reset.h"
#include <dt-bindings/clock/actions,s500-cmu.h>
+#include <dt-bindings/reset/actions,s500-reset.h>
#define CMU_COREPLL (0x0000)
#define CMU_DEVPLL (0x0004)
@@ -175,6 +177,8 @@ static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RAT
static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
/* gate clocks */
+static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
+static OWL_GATE(dmac_clk, "dmac_clk", "h_clk", CMU_DEVCLKEN0, 1, 0, 0);
static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
@@ -183,7 +187,8 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
/* divider clocks */
-static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
/* factor clocks */
@@ -428,6 +433,9 @@ static struct owl_clk_common *s500_clks[] = {
&spdif_clk.common,
&nand_clk.common,
&ecc_clk.common,
+ &apb_clk.common,
+ &dmac_clk.common,
+ &gpio_clk.common,
};
static struct clk_hw_onecell_data s500_hw_clks = {
@@ -484,24 +492,103 @@ static struct clk_hw_onecell_data s500_hw_clks = {
[CLK_SPDIF] = &spdif_clk.common.hw,
[CLK_NAND] = &nand_clk.common.hw,
[CLK_ECC] = &ecc_clk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_DMAC] = &dmac_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
},
.num = CLK_NR_CLKS,
};
+static const struct owl_reset_map s500_resets[] = {
+ [RESET_DMAC] = { CMU_DEVRST0, BIT(0) },
+ [RESET_NORIF] = { CMU_DEVRST0, BIT(1) },
+ [RESET_DDR] = { CMU_DEVRST0, BIT(2) },
+ [RESET_NANDC] = { CMU_DEVRST0, BIT(3) },
+ [RESET_SD0] = { CMU_DEVRST0, BIT(4) },
+ [RESET_SD1] = { CMU_DEVRST0, BIT(5) },
+ [RESET_PCM1] = { CMU_DEVRST0, BIT(6) },
+ [RESET_DE] = { CMU_DEVRST0, BIT(7) },
+ [RESET_LCD] = { CMU_DEVRST0, BIT(8) },
+ [RESET_SD2] = { CMU_DEVRST0, BIT(9) },
+ [RESET_DSI] = { CMU_DEVRST0, BIT(10) },
+ [RESET_CSI] = { CMU_DEVRST0, BIT(11) },
+ [RESET_BISP] = { CMU_DEVRST0, BIT(12) },
+ [RESET_KEY] = { CMU_DEVRST0, BIT(14) },
+ [RESET_GPIO] = { CMU_DEVRST0, BIT(15) },
+ [RESET_AUDIO] = { CMU_DEVRST0, BIT(17) },
+ [RESET_PCM0] = { CMU_DEVRST0, BIT(18) },
+ [RESET_VDE] = { CMU_DEVRST0, BIT(19) },
+ [RESET_VCE] = { CMU_DEVRST0, BIT(20) },
+ [RESET_GPU3D] = { CMU_DEVRST0, BIT(22) },
+ [RESET_NIC301] = { CMU_DEVRST0, BIT(23) },
+ [RESET_LENS] = { CMU_DEVRST0, BIT(26) },
+ [RESET_PERIPHRESET] = { CMU_DEVRST0, BIT(27) },
+ [RESET_USB2_0] = { CMU_DEVRST1, BIT(0) },
+ [RESET_TVOUT] = { CMU_DEVRST1, BIT(1) },
+ [RESET_HDMI] = { CMU_DEVRST1, BIT(2) },
+ [RESET_HDCP2TX] = { CMU_DEVRST1, BIT(3) },
+ [RESET_UART6] = { CMU_DEVRST1, BIT(4) },
+ [RESET_UART0] = { CMU_DEVRST1, BIT(5) },
+ [RESET_UART1] = { CMU_DEVRST1, BIT(6) },
+ [RESET_UART2] = { CMU_DEVRST1, BIT(7) },
+ [RESET_SPI0] = { CMU_DEVRST1, BIT(8) },
+ [RESET_SPI1] = { CMU_DEVRST1, BIT(9) },
+ [RESET_SPI2] = { CMU_DEVRST1, BIT(10) },
+ [RESET_SPI3] = { CMU_DEVRST1, BIT(11) },
+ [RESET_I2C0] = { CMU_DEVRST1, BIT(12) },
+ [RESET_I2C1] = { CMU_DEVRST1, BIT(13) },
+ [RESET_USB3] = { CMU_DEVRST1, BIT(14) },
+ [RESET_UART3] = { CMU_DEVRST1, BIT(15) },
+ [RESET_UART4] = { CMU_DEVRST1, BIT(16) },
+ [RESET_UART5] = { CMU_DEVRST1, BIT(17) },
+ [RESET_I2C2] = { CMU_DEVRST1, BIT(18) },
+ [RESET_I2C3] = { CMU_DEVRST1, BIT(19) },
+ [RESET_ETHERNET] = { CMU_DEVRST1, BIT(20) },
+ [RESET_CHIPID] = { CMU_DEVRST1, BIT(21) },
+ [RESET_USB2_1] = { CMU_DEVRST1, BIT(22) },
+ [RESET_WD0RESET] = { CMU_DEVRST1, BIT(24) },
+ [RESET_WD1RESET] = { CMU_DEVRST1, BIT(25) },
+ [RESET_WD2RESET] = { CMU_DEVRST1, BIT(26) },
+ [RESET_WD3RESET] = { CMU_DEVRST1, BIT(27) },
+ [RESET_DBG0RESET] = { CMU_DEVRST1, BIT(28) },
+ [RESET_DBG1RESET] = { CMU_DEVRST1, BIT(29) },
+ [RESET_DBG2RESET] = { CMU_DEVRST1, BIT(30) },
+ [RESET_DBG3RESET] = { CMU_DEVRST1, BIT(31) },
+};
+
static struct owl_clk_desc s500_clk_desc = {
.clks = s500_clks,
.num_clks = ARRAY_SIZE(s500_clks),
.hw_clks = &s500_hw_clks,
+
+ .resets = s500_resets,
+ .num_resets = ARRAY_SIZE(s500_resets),
};
static int s500_clk_probe(struct platform_device *pdev)
{
struct owl_clk_desc *desc;
+ struct owl_reset *reset;
+ int ret;
desc = &s500_clk_desc;
owl_clk_regmap_init(pdev, desc);
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.ops = &owl_reset_ops;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->reset_map = desc->resets;
+ reset->regmap = desc->regmap;
+
+ ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register reset controller\n");
+
return owl_clk_probe(&pdev->dev, desc->hw_clks);
}
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 8b90357f2a93..79301e1c1c36 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
+obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 38bdb4981315..2c3d8e6ca63c 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -160,7 +160,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 4, i,
- &at91rm9200_programmable_layout);
+ &at91rm9200_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index 6d0723aa8b13..bb81ff731ad8 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -436,7 +436,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
hw = at91_clk_register_programmable(regmap, name,
parent_names, 4, i,
- &at91rm9200_programmable_layout);
+ &at91rm9200_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 9873b583c260..c88ee20bee31 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -111,7 +111,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
@@ -181,7 +181,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91sam9g45_programmable_layout);
+ &at91sam9g45_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 630dc5d87171..93f7eb216122 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -124,7 +124,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
@@ -199,7 +199,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91sam9x5_programmable_layout);
+ &at91sam9x5_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -222,7 +223,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
at91sam9n12_periphck[i].n,
"masterck",
at91sam9n12_periphck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 0d1cc44b056f..a343eb69bb35 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -137,7 +137,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91rm9200_programmable_layout);
+ &at91rm9200_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 0ce3da080287..22b9aad9efb8 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -226,7 +226,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91sam9x5_programmable_layout);
+ &at91sam9x5_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -257,7 +258,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
at91sam9x5_periphck[i].n,
"masterck",
at91sam9x5_periphck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
@@ -270,7 +271,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
extra_pcks[i].n,
"masterck",
extra_pcks[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 44a46dcc0518..b4fc8d71daf2 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -18,18 +18,17 @@
#define GENERATED_MAX_DIV 255
-#define GCK_INDEX_DT_AUDIO_PLL 5
-
struct clk_generated {
struct clk_hw hw;
struct regmap *regmap;
struct clk_range range;
spinlock_t *lock;
+ u32 *mux_table;
u32 id;
u32 gckdiv;
const struct clk_pcr_layout *layout;
u8 parent_id;
- bool audio_pll_allowed;
+ int chg_pid;
};
#define to_clk_generated(hw) \
@@ -83,7 +82,7 @@ static int clk_generated_is_enabled(struct clk_hw *hw)
regmap_read(gck->regmap, gck->layout->offset, &status);
spin_unlock_irqrestore(gck->lock, flags);
- return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
+ return !!(status & AT91_PMC_PCR_GCKEN);
}
static unsigned long
@@ -109,7 +108,7 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate / div;
tmp_diff = abs(req->rate - tmp_rate);
- if (*best_diff < 0 || *best_diff > tmp_diff) {
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
*best_rate = tmp_rate;
*best_diff = tmp_diff;
req->best_parent_rate = parent_rate;
@@ -129,7 +128,10 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
int i;
u32 div;
- for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ if (gck->chg_pid == i)
+ continue;
+
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
@@ -161,16 +163,17 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
* that the only clks able to modify gck rate are those of audio IPs.
*/
- if (!gck->audio_pll_allowed)
+ if (gck->chg_pid < 0)
goto end;
- parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
+ parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
if (!parent)
goto end;
for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
req_parent.rate = req->rate * div;
- __clk_determine_rate(parent, &req_parent);
+ if (__clk_determine_rate(parent, &req_parent))
+ continue;
clk_generated_best_diff(req, parent, req_parent.rate, div,
&best_diff, &best_rate);
@@ -184,8 +187,8 @@ end:
__clk_get_name((req->best_parent_hw)->clk),
req->best_parent_rate);
- if (best_rate < 0)
- return best_rate;
+ if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
+ return -EINVAL;
req->rate = best_rate;
return 0;
@@ -199,7 +202,11 @@ static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- gck->parent_id = index;
+ if (gck->mux_table)
+ gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
+ else
+ gck->parent_id = index;
+
return 0;
}
@@ -271,8 +278,9 @@ struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
- u8 num_parents, u8 id, bool pll_audio,
- const struct clk_range *range)
+ u32 *mux_table, u8 num_parents, u8 id,
+ const struct clk_range *range,
+ int chg_pid)
{
struct clk_generated *gck;
struct clk_init_data init;
@@ -287,16 +295,18 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
init.ops = &generated_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ if (chg_pid >= 0)
+ init.flags |= CLK_SET_RATE_PARENT;
gck->id = id;
gck->hw.init = &init;
gck->regmap = regmap;
gck->lock = lock;
gck->range = *range;
- gck->audio_pll_allowed = pll_audio;
+ gck->chg_pid = chg_pid;
gck->layout = layout;
+ gck->mux_table = mux_table;
clk_generated_startup(gck);
hw = &gck->hw;
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 37c22667e831..5c83e899084f 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -175,7 +175,7 @@ static bool clk_main_rc_osc_ready(struct regmap *regmap)
regmap_read(regmap, AT91_PMC_SR, &status);
- return status & AT91_PMC_MOSCRCS;
+ return !!(status & AT91_PMC_MOSCRCS);
}
static int clk_main_rc_osc_prepare(struct clk_hw *hw)
@@ -336,7 +336,7 @@ static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status);
- return status & AT91_PMC_MAINRDY ? 1 : 0;
+ return !!(status & AT91_PMC_MAINRDY);
}
static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
@@ -398,7 +398,7 @@ static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
regmap_read(regmap, AT91_PMC_SR, &status);
- return status & AT91_PMC_MOSCSELS ? 1 : 0;
+ return !!(status & AT91_PMC_MOSCSELS);
}
static int clk_sam9x5_main_prepare(struct clk_hw *hw)
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index e7e0ba652de1..bd0d8a69a2cf 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -17,30 +17,49 @@
#define MASTER_DIV_SHIFT 8
#define MASTER_DIV_MASK 0x3
+#define PMC_MCR 0x30
+#define PMC_MCR_ID_MSK GENMASK(3, 0)
+#define PMC_MCR_CMD BIT(7)
+#define PMC_MCR_DIV GENMASK(10, 8)
+#define PMC_MCR_CSS GENMASK(20, 16)
+#define PMC_MCR_CSS_SHIFT (16)
+#define PMC_MCR_EN BIT(28)
+
+#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK)
+
+#define MASTER_MAX_ID 4
+
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
struct clk_master {
struct clk_hw hw;
struct regmap *regmap;
+ spinlock_t *lock;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
+ u32 *mux_table;
u32 mckr;
+ int chg_pid;
+ u8 id;
+ u8 parent;
+ u8 div;
};
-static inline bool clk_master_ready(struct regmap *regmap)
+static inline bool clk_master_ready(struct clk_master *master)
{
+ unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY;
unsigned int status;
- regmap_read(regmap, AT91_PMC_SR, &status);
+ regmap_read(master->regmap, AT91_PMC_SR, &status);
- return status & AT91_PMC_MCKRDY ? 1 : 0;
+ return !!(status & bit);
}
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- while (!clk_master_ready(master->regmap))
+ while (!clk_master_ready(master))
cpu_relax();
return 0;
@@ -50,7 +69,7 @@ static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- return clk_master_ready(master->regmap);
+ return clk_master_ready(master);
}
static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
@@ -143,6 +162,287 @@ at91_clk_register_master(struct regmap *regmap,
return hw;
}
+static unsigned long
+clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
+}
+
+static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate,
+ long *best_rate,
+ long *best_diff,
+ u32 div)
+{
+ unsigned long tmp_rate, tmp_diff;
+
+ if (div == MASTER_PRES_MAX)
+ tmp_rate = parent_rate / 3;
+ else
+ tmp_rate = parent_rate >> div;
+
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
+static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct clk_rate_request req_parent = *req;
+ struct clk_hw *parent;
+ long best_rate = LONG_MIN, best_diff = LONG_MIN;
+ unsigned long parent_rate;
+ unsigned int div, i;
+
+ /* First: check the dividers of MCR. */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ continue;
+
+ for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+ clk_sama7g5_master_best_diff(req, parent, parent_rate,
+ &best_rate, &best_diff,
+ div);
+ if (!best_diff)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ /* Second: try to request rate form changeable parent. */
+ if (master->chg_pid < 0)
+ goto end;
+
+ parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
+ if (!parent)
+ goto end;
+
+ for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+ if (div == MASTER_PRES_MAX)
+ req_parent.rate = req->rate * 3;
+ else
+ req_parent.rate = req->rate << div;
+
+ if (__clk_determine_rate(parent, &req_parent))
+ continue;
+
+ clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
+ &best_rate, &best_diff, div);
+
+ if (!best_diff)
+ break;
+ }
+
+end:
+ pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return -EINVAL;
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ u8 index;
+
+ spin_lock_irqsave(master->lock, flags);
+ index = clk_mux_val_to_index(&master->hw, master->mux_table, 0,
+ master->parent);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return index;
+}
+
+static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ spin_lock_irqsave(master->lock, flags);
+ master->parent = clk_mux_index_to_val(master->mux_table, 0, index);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static int clk_sama7g5_master_enable(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int val, cparent;
+
+ spin_lock_irqsave(master->lock, flags);
+
+ regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id));
+ regmap_read(master->regmap, PMC_MCR, &val);
+ regmap_update_bits(master->regmap, PMC_MCR,
+ PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV |
+ PMC_MCR_CMD | PMC_MCR_ID_MSK,
+ PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) |
+ (master->div << MASTER_DIV_SHIFT) |
+ PMC_MCR_CMD | PMC_MCR_ID(master->id));
+
+ cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+
+ /* Wait here only if parent is being changed. */
+ while ((cparent != master->parent) && !clk_master_ready(master))
+ cpu_relax();
+
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static void clk_sama7g5_master_disable(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(master->lock, flags);
+
+ regmap_write(master->regmap, PMC_MCR, master->id);
+ regmap_update_bits(master->regmap, PMC_MCR,
+ PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK,
+ PMC_MCR_CMD | PMC_MCR_ID(master->id));
+
+ spin_unlock_irqrestore(master->lock, flags);
+}
+
+static int clk_sama7g5_master_is_enabled(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(master->lock, flags);
+
+ regmap_write(master->regmap, PMC_MCR, master->id);
+ regmap_read(master->regmap, PMC_MCR, &val);
+
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return !!(val & PMC_MCR_EN);
+}
+
+static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long div, flags;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1)))
+ return -EINVAL;
+
+ if (div == 3)
+ div = MASTER_PRES_MAX;
+ else
+ div = ffs(div) - 1;
+
+ spin_lock_irqsave(master->lock, flags);
+ master->div = div;
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops sama7g5_master_ops = {
+ .enable = clk_sama7g5_master_enable,
+ .disable = clk_sama7g5_master_disable,
+ .is_enabled = clk_sama7g5_master_is_enabled,
+ .recalc_rate = clk_sama7g5_master_recalc_rate,
+ .determine_rate = clk_sama7g5_master_determine_rate,
+ .set_rate = clk_sama7g5_master_set_rate,
+ .get_parent = clk_sama7g5_master_get_parent,
+ .set_parent = clk_sama7g5_master_set_parent,
+};
+
+struct clk_hw * __init
+at91_clk_sama7g5_register_master(struct regmap *regmap,
+ const char *name, int num_parents,
+ const char **parent_names,
+ u32 *mux_table,
+ spinlock_t *lock, u8 id,
+ bool critical, int chg_pid)
+{
+ struct clk_master *master;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ unsigned long flags;
+ unsigned int val;
+ int ret;
+
+ if (!name || !num_parents || !parent_names || !mux_table ||
+ !lock || id > MASTER_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sama7g5_master_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ if (chg_pid >= 0)
+ init.flags |= CLK_SET_RATE_PARENT;
+ if (critical)
+ init.flags |= CLK_IS_CRITICAL;
+
+ master->hw.init = &init;
+ master->regmap = regmap;
+ master->id = id;
+ master->chg_pid = chg_pid;
+ master->lock = lock;
+ master->mux_table = mux_table;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_write(master->regmap, PMC_MCR, master->id);
+ regmap_read(master->regmap, PMC_MCR, &val);
+ master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+ master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT;
+ spin_unlock_irqrestore(master->lock, flags);
+
+ hw = &master->hw;
+ ret = clk_hw_register(NULL, &master->hw);
+ if (ret) {
+ kfree(master);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
const struct clk_master_layout at91rm9200_master_layout = {
.mask = 0x31F,
.pres_shift = 2,
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c2ab4860a2bf..7867eaf0447f 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -38,6 +38,7 @@ struct clk_sam9x5_peripheral {
u32 div;
const struct clk_pcr_layout *layout;
bool auto_div;
+ int chg_pid;
};
#define to_clk_sam9x5_peripheral(hw) \
@@ -208,7 +209,7 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
regmap_read(periph->regmap, periph->layout->offset, &status);
spin_unlock_irqrestore(periph->lock, flags);
- return status & AT91_PMC_PCR_EN ? 1 : 0;
+ return !!(status & AT91_PMC_PCR_EN);
}
static unsigned long
@@ -238,6 +239,87 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
return parent_rate >> periph->div;
}
+static void clk_sam9x5_peripheral_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate,
+ u32 shift, long *best_diff,
+ long *best_rate)
+{
+ unsigned long tmp_rate = parent_rate >> shift;
+ unsigned long tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
+static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ struct clk_rate_request req_parent = *req;
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long tmp_rate;
+ long best_rate = LONG_MIN;
+ long best_diff = LONG_MIN;
+ u32 shift;
+
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+ return parent_rate;
+
+ /* Fist step: check the available dividers. */
+ for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+ tmp_rate = parent_rate >> shift;
+
+ if (periph->range.max && tmp_rate > periph->range.max)
+ continue;
+
+ clk_sam9x5_peripheral_best_diff(req, parent, parent_rate,
+ shift, &best_diff, &best_rate);
+
+ if (!best_diff || best_rate <= req->rate)
+ break;
+ }
+
+ if (periph->chg_pid < 0)
+ goto end;
+
+ /* Step two: try to request rate from parent. */
+ parent = clk_hw_get_parent_by_index(hw, periph->chg_pid);
+ if (!parent)
+ goto end;
+
+ for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+ req_parent.rate = req->rate << shift;
+
+ if (__clk_determine_rate(parent, &req_parent))
+ continue;
+
+ clk_sam9x5_peripheral_best_diff(req, parent, req_parent.rate,
+ shift, &best_diff, &best_rate);
+
+ if (!best_diff)
+ break;
+ }
+end:
+ if (best_rate < 0 ||
+ (periph->range.max && best_rate > periph->range.max))
+ return -EINVAL;
+
+ pr_debug("PCK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
@@ -320,11 +402,21 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.set_rate = clk_sam9x5_peripheral_set_rate,
};
+static const struct clk_ops sam9x5_peripheral_chg_ops = {
+ .enable = clk_sam9x5_peripheral_enable,
+ .disable = clk_sam9x5_peripheral_disable,
+ .is_enabled = clk_sam9x5_peripheral_is_enabled,
+ .recalc_rate = clk_sam9x5_peripheral_recalc_rate,
+ .determine_rate = clk_sam9x5_peripheral_determine_rate,
+ .set_rate = clk_sam9x5_peripheral_set_rate,
+};
+
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
- u32 id, const struct clk_range *range)
+ u32 id, const struct clk_range *range,
+ int chg_pid)
{
struct clk_sam9x5_peripheral *periph;
struct clk_init_data init;
@@ -339,10 +431,16 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &sam9x5_peripheral_ops;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ if (chg_pid < 0) {
+ init.flags = 0;
+ init.ops = &sam9x5_peripheral_ops;
+ } else {
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+ init.ops = &sam9x5_peripheral_chg_ops;
+ }
periph->id = id;
periph->hw.init = &init;
@@ -353,6 +451,7 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
periph->auto_div = true;
periph->layout = layout;
periph->range = *range;
+ periph->chg_pid = chg_pid;
hw = &periph->hw;
ret = clk_hw_register(NULL, &periph->hw);
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 8ee66fbee3d9..fcf8f6a1c2c6 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -21,6 +21,7 @@
struct clk_programmable {
struct clk_hw hw;
struct regmap *regmap;
+ u32 *mux_table;
u8 id;
const struct clk_programmable_layout *layout;
};
@@ -108,6 +109,9 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
+ if (prog->mux_table)
+ pckr = clk_mux_index_to_val(prog->mux_table, 0, index);
+
if (index > layout->css_mask) {
if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck)
return -EINVAL;
@@ -134,6 +138,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw)
if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret)
ret = PROG_MAX_RM9200_CSS + 1;
+ if (prog->mux_table)
+ ret = clk_mux_val_to_index(&prog->hw, prog->mux_table, 0, ret);
+
return ret;
}
@@ -182,7 +189,8 @@ struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
- const struct clk_programmable_layout *layout)
+ const struct clk_programmable_layout *layout,
+ u32 *mux_table)
{
struct clk_programmable *prog;
struct clk_hw *hw;
@@ -206,6 +214,7 @@ at91_clk_register_programmable(struct regmap *regmap,
prog->layout = layout;
prog->hw.init = &init;
prog->regmap = regmap;
+ prog->mux_table = mux_table;
hw = &prog->hw;
ret = clk_hw_register(NULL, &prog->hw);
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index e699803986e5..b473298ef7e6 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -15,26 +15,41 @@
#include "pmc.h"
#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
-#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
+#define PMC_PLL_CTRL1_MUL_MSK GENMASK(31, 24)
+#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
#define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define PLL_MAX_ID 1
+#define FCORE_MIN (600000000)
+#define FCORE_MAX (1200000000)
-struct sam9x60_pll {
- struct clk_hw hw;
+#define PLL_MAX_ID 7
+
+struct sam9x60_pll_core {
struct regmap *regmap;
spinlock_t *lock;
const struct clk_pll_characteristics *characteristics;
- u32 frac;
+ const struct clk_pll_layout *layout;
+ struct clk_hw hw;
u8 id;
- u8 div;
+};
+
+struct sam9x60_frac {
+ struct sam9x60_pll_core core;
+ u32 frac;
u16 mul;
};
-#define to_sam9x60_pll(hw) container_of(hw, struct sam9x60_pll, hw)
+struct sam9x60_div {
+ struct sam9x60_pll_core core;
+ u8 div;
+};
+
+#define to_sam9x60_pll_core(hw) container_of(hw, struct sam9x60_pll_core, hw)
+#define to_sam9x60_frac(core) container_of(core, struct sam9x60_frac, core)
+#define to_sam9x60_div(core) container_of(core, struct sam9x60_div, core)
static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
{
@@ -45,41 +60,53 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
return !!(status & BIT(id));
}
-static int sam9x60_pll_prepare(struct clk_hw *hw)
+static bool sam9x60_frac_pll_ready(struct regmap *regmap, u8 id)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
- struct regmap *regmap = pll->regmap;
- unsigned long flags;
- u8 div;
- u16 mul;
- u32 val;
+ return sam9x60_pll_ready(regmap, id);
+}
- spin_lock_irqsave(pll->lock, flags);
- regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id);
+static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
- regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
- div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
+ return (parent_rate * (frac->mul + 1) +
+ ((u64)parent_rate * frac->frac >> 22));
+}
+static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ struct regmap *regmap = core->regmap;
+ unsigned int val, cfrac, cmul;
+ unsigned long flags;
+
+ spin_lock_irqsave(core->lock, flags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
- mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+ cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
+ cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
- if (sam9x60_pll_ready(regmap, pll->id) &&
- (div == pll->div && mul == pll->mul)) {
- spin_unlock_irqrestore(pll->lock, flags);
- return 0;
- }
+ if (sam9x60_frac_pll_ready(regmap, core->id) &&
+ (cmul == frac->mul && cfrac == frac->frac))
+ goto unlock;
- /* Recommended value for AT91_PMC_PLL_ACR */
- if (pll->characteristics->upll)
+ /* Recommended value for PMC_PLL_ACR */
+ if (core->characteristics->upll)
val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
else
val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
- FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
+ (frac->mul << core->layout->mul_shift) |
+ (frac->frac << core->layout->frac_shift));
- if (pll->characteristics->upll) {
+ if (core->characteristics->upll) {
/* Enable the UTMI internal bandgap */
val |= AT91_PMC_PLL_ACR_UTMIBG;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
@@ -94,221 +121,409 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
}
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
- regmap_write(regmap, AT91_PMC_PLL_CTRL0,
- AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL |
- AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div);
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
- while (!sam9x60_pll_ready(regmap, pll->id))
+ while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
- spin_unlock_irqrestore(pll->lock, flags);
+unlock:
+ spin_unlock_irqrestore(core->lock, flags);
return 0;
}
-static int sam9x60_pll_is_prepared(struct clk_hw *hw)
+static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct regmap *regmap = core->regmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(core->lock, flags);
- return sam9x60_pll_ready(pll->regmap, pll->id);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
+
+ if (core->characteristics->upll)
+ regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
+ AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ spin_unlock_irqrestore(core->lock, flags);
}
-static void sam9x60_pll_unprepare(struct clk_hw *hw)
+static int sam9x60_frac_pll_is_prepared(struct clk_hw *hw)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
- unsigned long flags;
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- spin_lock_irqsave(pll->lock, flags);
+ return sam9x60_pll_ready(core->regmap, core->id);
+}
- regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id);
+static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
+ unsigned long rate,
+ unsigned long parent_rate,
+ bool update)
+{
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ unsigned long tmprate, remainder;
+ unsigned long nmul = 0;
+ unsigned long nfrac = 0;
- regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
- AT91_PMC_PLL_CTRL0_ENPLLCK, 0);
+ if (rate < FCORE_MIN || rate > FCORE_MAX)
+ return -ERANGE;
- regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+ /*
+ * Calculate the multiplier associated with the current
+ * divider that provide the closest rate to the requested one.
+ */
+ nmul = mult_frac(rate, 1, parent_rate);
+ tmprate = mult_frac(parent_rate, nmul, 1);
+ remainder = rate - tmprate;
- regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
- AT91_PMC_PLL_CTRL0_ENPLL, 0);
+ if (remainder) {
+ nfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * (1 << 22),
+ parent_rate);
- if (pll->characteristics->upll)
- regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR,
- AT91_PMC_PLL_ACR_UTMIBG |
- AT91_PMC_PLL_ACR_UTMIVR, 0);
+ tmprate += DIV_ROUND_CLOSEST_ULL((u64)nfrac * parent_rate,
+ (1 << 22));
+ }
- regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+ /* Check if resulted rate is a valid. */
+ if (tmprate < FCORE_MIN || tmprate > FCORE_MAX)
+ return -ERANGE;
- spin_unlock_irqrestore(pll->lock, flags);
+ if (update) {
+ frac->mul = nmul - 1;
+ frac->frac = nfrac;
+ }
+
+ return tmprate;
}
-static unsigned long sam9x60_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return (parent_rate * (pll->mul + 1)) / (pll->div + 1);
+ return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
}
-static long sam9x60_pll_get_best_div_mul(struct sam9x60_pll *pll,
- unsigned long rate,
- unsigned long parent_rate,
- bool update)
+static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- const struct clk_pll_characteristics *characteristics =
- pll->characteristics;
- unsigned long bestremainder = ULONG_MAX;
- unsigned long maxdiv, mindiv, tmpdiv;
- long bestrate = -ERANGE;
- unsigned long bestdiv = 0;
- unsigned long bestmul = 0;
- unsigned long bestfrac = 0;
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- if (rate < characteristics->output[0].min ||
- rate > characteristics->output[0].max)
- return -ERANGE;
+ return sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
+}
- if (!pll->characteristics->upll) {
- mindiv = parent_rate / rate;
- if (mindiv < 2)
- mindiv = 2;
+static const struct clk_ops sam9x60_frac_pll_ops = {
+ .prepare = sam9x60_frac_pll_prepare,
+ .unprepare = sam9x60_frac_pll_unprepare,
+ .is_prepared = sam9x60_frac_pll_is_prepared,
+ .recalc_rate = sam9x60_frac_pll_recalc_rate,
+ .round_rate = sam9x60_frac_pll_round_rate,
+ .set_rate = sam9x60_frac_pll_set_rate,
+};
- maxdiv = DIV_ROUND_UP(parent_rate * PLL_MUL_MAX, rate);
- if (maxdiv > PLL_DIV_MAX)
- maxdiv = PLL_DIV_MAX;
- } else {
- mindiv = maxdiv = UPLL_DIV;
- }
+static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+ struct regmap *regmap = core->regmap;
+ unsigned long flags;
+ unsigned int val, cdiv;
- for (tmpdiv = mindiv; tmpdiv <= maxdiv; tmpdiv++) {
- unsigned long remainder;
- unsigned long tmprate;
- unsigned long tmpmul;
- unsigned long tmpfrac = 0;
+ spin_lock_irqsave(core->lock, flags);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
- /*
- * Calculate the multiplier associated with the current
- * divider that provide the closest rate to the requested one.
- */
- tmpmul = mult_frac(rate, tmpdiv, parent_rate);
- tmprate = mult_frac(parent_rate, tmpmul, tmpdiv);
- remainder = rate - tmprate;
+ /* Stop if enabled an nothing changed. */
+ if (!!(val & core->layout->endiv_mask) && cdiv == div->div)
+ goto unlock;
- if (remainder) {
- tmpfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * tmpdiv * (1 << 22),
- parent_rate);
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->div_mask | core->layout->endiv_mask,
+ (div->div << core->layout->div_shift) |
+ (1 << core->layout->endiv_shift));
- tmprate += DIV_ROUND_CLOSEST_ULL((u64)tmpfrac * parent_rate,
- tmpdiv * (1 << 22));
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
- if (tmprate > rate)
- remainder = tmprate - rate;
- else
- remainder = rate - tmprate;
- }
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
- /*
- * Compare the remainder with the best remainder found until
- * now and elect a new best multiplier/divider pair if the
- * current remainder is smaller than the best one.
- */
- if (remainder < bestremainder) {
- bestremainder = remainder;
- bestdiv = tmpdiv;
- bestmul = tmpmul;
- bestrate = tmprate;
- bestfrac = tmpfrac;
+unlock:
+ spin_unlock_irqrestore(core->lock, flags);
+
+ return 0;
+}
+
+static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct regmap *regmap = core->regmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(core->lock, flags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->endiv_mask, 0);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ spin_unlock_irqrestore(core->lock, flags);
+}
+
+static int sam9x60_div_pll_is_prepared(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct regmap *regmap = core->regmap;
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(core->lock, flags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+
+ spin_unlock_irqrestore(core->lock, flags);
+
+ return !!(val & core->layout->endiv_mask);
+}
+
+static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1));
+}
+
+static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
+ unsigned long *parent_rate,
+ unsigned long rate)
+{
+ const struct clk_pll_characteristics *characteristics =
+ core->characteristics;
+ struct clk_hw *parent = clk_hw_get_parent(&core->hw);
+ unsigned long tmp_rate, tmp_parent_rate, tmp_diff;
+ long best_diff = -1, best_rate = -EINVAL;
+ u32 divid, best_div;
+
+ if (!rate)
+ return 0;
+
+ if (rate < characteristics->output[0].min ||
+ rate > characteristics->output[0].max)
+ return -ERANGE;
+
+ for (divid = 1; divid < core->layout->div_mask; divid++) {
+ tmp_parent_rate = clk_hw_round_rate(parent, rate * divid);
+ if (!tmp_parent_rate)
+ continue;
+
+ tmp_rate = DIV_ROUND_CLOSEST_ULL(tmp_parent_rate, divid);
+ tmp_diff = abs(rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ *parent_rate = tmp_parent_rate;
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ best_div = divid;
}
- /* We've found a perfect match! */
- if (!remainder)
+ if (!best_diff)
break;
}
- /* Check if bestrate is a valid output rate */
- if (bestrate < characteristics->output[0].min &&
- bestrate > characteristics->output[0].max)
+ if (best_rate < characteristics->output[0].min ||
+ best_rate > characteristics->output[0].max)
return -ERANGE;
- if (update) {
- pll->div = bestdiv - 1;
- pll->mul = bestmul - 1;
- pll->frac = bestfrac;
- }
-
- return bestrate;
+ return best_rate;
}
-static long sam9x60_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_pll_get_best_div_mul(pll, rate, *parent_rate, false);
+ return sam9x60_div_pll_compute_div(core, parent_rate, rate);
}
-static int sam9x60_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
- return sam9x60_pll_get_best_div_mul(pll, rate, parent_rate, true);
+ return 0;
}
-static const struct clk_ops pll_ops = {
- .prepare = sam9x60_pll_prepare,
- .unprepare = sam9x60_pll_unprepare,
- .is_prepared = sam9x60_pll_is_prepared,
- .recalc_rate = sam9x60_pll_recalc_rate,
- .round_rate = sam9x60_pll_round_rate,
- .set_rate = sam9x60_pll_set_rate,
+static const struct clk_ops sam9x60_div_pll_ops = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_div_pll_recalc_rate,
+ .round_rate = sam9x60_div_pll_round_rate,
+ .set_rate = sam9x60_div_pll_set_rate,
};
struct clk_hw * __init
-sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
- const char *name, const char *parent_name, u8 id,
- const struct clk_pll_characteristics *characteristics)
+sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw, u8 id,
+ const struct clk_pll_characteristics *characteristics,
+ const struct clk_pll_layout *layout, bool critical)
{
- struct sam9x60_pll *pll;
+ struct sam9x60_frac *frac;
struct clk_hw *hw;
struct clk_init_data init;
- unsigned int pllr;
+ unsigned long parent_rate, flags;
+ unsigned int val;
int ret;
- if (id > PLL_MAX_ID)
+ if (id > PLL_MAX_ID || !lock || !parent_hw)
return ERR_PTR(-EINVAL);
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll)
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac)
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &pll_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.ops = &sam9x60_frac_pll_ops;
init.flags = CLK_SET_RATE_GATE;
+ if (critical)
+ init.flags |= CLK_IS_CRITICAL;
+
+ frac->core.id = id;
+ frac->core.hw.init = &init;
+ frac->core.characteristics = characteristics;
+ frac->core.layout = layout;
+ frac->core.regmap = regmap;
+ frac->core.lock = lock;
+
+ spin_lock_irqsave(frac->core.lock, flags);
+ if (sam9x60_pll_ready(regmap, id)) {
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
+ frac->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+ frac->frac = FIELD_GET(PMC_PLL_CTRL1_FRACR_MSK, val);
+ } else {
+ /*
+ * This means the PLL is not setup by bootloaders. In this
+ * case we need to set the minimum rate for it. Otherwise
+ * a clock child of this PLL may be enabled before setting
+ * its rate leading to enabling this PLL with unsupported
+ * rate. This will lead to PLL not being locked at all.
+ */
+ parent_rate = clk_hw_get_rate(parent_hw);
+ if (!parent_rate) {
+ hw = ERR_PTR(-EINVAL);
+ goto free;
+ }
+
+ ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ parent_rate, true);
+ if (ret <= 0) {
+ hw = ERR_PTR(ret);
+ goto free;
+ }
+ }
+ spin_unlock_irqrestore(frac->core.lock, flags);
+
+ hw = &frac->core.hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(frac);
+ hw = ERR_PTR(ret);
+ }
- pll->id = id;
- pll->hw.init = &init;
- pll->characteristics = characteristics;
- pll->regmap = regmap;
- pll->lock = lock;
+ return hw;
+
+free:
+ spin_unlock_irqrestore(frac->core.lock, flags);
+ kfree(frac);
+ return hw;
+}
+
+struct clk_hw * __init
+sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics,
+ const struct clk_pll_layout *layout, bool critical)
+{
+ struct sam9x60_div *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ unsigned long flags;
+ unsigned int val;
+ int ret;
+
+ if (id > PLL_MAX_ID || !lock)
+ return ERR_PTR(-EINVAL);
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.ops = &sam9x60_div_pll_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+ if (critical)
+ init.flags |= CLK_IS_CRITICAL;
+
+ div->core.id = id;
+ div->core.hw.init = &init;
+ div->core.characteristics = characteristics;
+ div->core.layout = layout;
+ div->core.regmap = regmap;
+ div->core.lock = lock;
+
+ spin_lock_irqsave(div->core.lock, flags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ div->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
- regmap_write(regmap, AT91_PMC_PLL_UPDT, id);
- regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr);
- pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
- regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr);
- pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
+ spin_unlock_irqrestore(div->core.lock, flags);
- hw = &pll->hw;
+ hw = &div->core.hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
- kfree(pll);
+ kfree(div);
hw = ERR_PTR(ret);
}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index c4b3877aa445..f83ec0de86c3 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -34,7 +34,7 @@ static inline bool clk_system_ready(struct regmap *regmap, int id)
regmap_read(regmap, AT91_PMC_SR, &status);
- return status & (1 << id) ? 1 : 0;
+ return !!(status & (1 << id));
}
static int clk_system_prepare(struct clk_hw *hw)
@@ -74,7 +74,7 @@ static int clk_system_is_prepared(struct clk_hw *hw)
regmap_read(sys->regmap, AT91_PMC_SR, &status);
- return status & (1 << sys->id) ? 1 : 0;
+ return !!(status & (1 << sys->id));
}
static const struct clk_ops system_ops = {
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index f1ef4e1f41a9..df9f3fc3b6a6 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -120,9 +120,11 @@ static const struct clk_ops utmi_ops = {
.recalc_rate = clk_utmi_recalc_rate,
};
-struct clk_hw * __init
-at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
- const char *name, const char *parent_name)
+static struct clk_hw * __init
+at91_clk_register_utmi_internal(struct regmap *regmap_pmc,
+ struct regmap *regmap_sfr,
+ const char *name, const char *parent_name,
+ const struct clk_ops *ops, unsigned long flags)
{
struct clk_utmi *utmi;
struct clk_hw *hw;
@@ -134,10 +136,10 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &utmi_ops;
+ init.ops = ops;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
- init.flags = CLK_SET_RATE_GATE;
+ init.flags = flags;
utmi->hw.init = &init;
utmi->regmap_pmc = regmap_pmc;
@@ -152,3 +154,94 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
return hw;
}
+
+struct clk_hw * __init
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
+ const char *name, const char *parent_name)
+{
+ return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name,
+ parent_name, &utmi_ops, CLK_SET_RATE_GATE);
+}
+
+static int clk_utmi_sama7g5_prepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct clk_hw *hw_parent;
+ unsigned long parent_rate;
+ unsigned int val;
+
+ hw_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(hw_parent);
+
+ switch (parent_rate) {
+ case 16000000:
+ val = 0;
+ break;
+ case 20000000:
+ val = 2;
+ break;
+ case 24000000:
+ val = 3;
+ break;
+ case 32000000:
+ val = 5;
+ break;
+ default:
+ pr_err("UTMICK: unsupported main_xtal rate\n");
+ return -EINVAL;
+ }
+
+ regmap_write(utmi->regmap_pmc, AT91_PMC_XTALF, val);
+
+ return 0;
+
+}
+
+static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct clk_hw *hw_parent;
+ unsigned long parent_rate;
+ unsigned int val;
+
+ hw_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(hw_parent);
+
+ regmap_read(utmi->regmap_pmc, AT91_PMC_XTALF, &val);
+ switch (val & 0x7) {
+ case 0:
+ if (parent_rate == 16000000)
+ return 1;
+ break;
+ case 2:
+ if (parent_rate == 20000000)
+ return 1;
+ break;
+ case 3:
+ if (parent_rate == 24000000)
+ return 1;
+ break;
+ case 5:
+ if (parent_rate == 32000000)
+ return 1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops sama7g5_utmi_ops = {
+ .prepare = clk_utmi_sama7g5_prepare,
+ .is_prepared = clk_utmi_sama7g5_is_prepared,
+ .recalc_rate = clk_utmi_recalc_rate,
+};
+
+struct clk_hw * __init
+at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name,
+ const char *parent_name)
+{
+ return at91_clk_register_utmi_internal(regmap_pmc, NULL, name,
+ parent_name, &sama7g5_utmi_ops, 0);
+}
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index aa1754eac59f..a50084de97d4 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -22,6 +22,8 @@
#define SYSTEM_MAX_ID 31
+#define GCK_INDEX_DT_AUDIO_PLL 5
+
#ifdef CONFIG_HAVE_AT91_AUDIO_PLL
static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
{
@@ -135,7 +137,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
return;
for_each_child_of_node(np, gcknp) {
- bool pll_audio = false;
+ int chg_pid = INT_MIN;
if (of_property_read_u32(gcknp, "reg", &id))
continue;
@@ -152,12 +154,13 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
(id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
id == GCK_ID_CLASSD))
- pll_audio = true;
+ chg_pid = GCK_INDEX_DT_AUDIO_PLL;
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&dt_pcr_layout, name,
- parent_names, num_parents,
- id, pll_audio, &range);
+ parent_names, NULL,
+ num_parents, id, &range,
+ chg_pid);
if (IS_ERR(hw))
continue;
@@ -460,7 +463,8 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
&dt_pcr_layout,
name,
parent_name,
- id, &range);
+ id, &range,
+ INT_MIN);
}
if (IS_ERR(hw))
@@ -673,7 +677,8 @@ CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
static void __init
of_at91_clk_prog_setup(struct device_node *np,
- const struct clk_programmable_layout *layout)
+ const struct clk_programmable_layout *layout,
+ u32 *mux_table)
{
int num;
u32 id;
@@ -707,7 +712,7 @@ of_at91_clk_prog_setup(struct device_node *np,
hw = at91_clk_register_programmable(regmap, name,
parent_names, num_parents,
- id, layout);
+ id, layout, mux_table);
if (IS_ERR(hw))
continue;
@@ -717,21 +722,21 @@ of_at91_clk_prog_setup(struct device_node *np,
static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout, NULL);
}
CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
of_at91rm9200_clk_prog_setup);
static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout, NULL);
}
CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
of_at91sam9g45_clk_prog_setup);
static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout, NULL);
}
CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
of_at91sam9x5_clk_prog_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index df616f2937e7..7b86affc6d7c 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -54,8 +54,14 @@ struct clk_master_characteristics {
struct clk_pll_layout {
u32 pllr_mask;
- u16 mul_mask;
+ u32 mul_mask;
+ u32 frac_mask;
+ u32 div_mask;
+ u32 endiv_mask;
u8 mul_shift;
+ u8 frac_shift;
+ u8 div_shift;
+ u8 endiv_shift;
};
extern const struct clk_pll_layout at91rm9200_pll_layout;
@@ -122,8 +128,8 @@ struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
- u8 num_parents, u8 id, bool pll_audio,
- const struct clk_range *range);
+ u32 *mux_table, u8 num_parents, u8 id,
+ const struct clk_range *range, int chg_pid);
struct clk_hw * __init
at91_clk_register_h32mx(struct regmap *regmap, const char *name,
@@ -155,13 +161,21 @@ at91_clk_register_master(struct regmap *regmap, const char *name,
const struct clk_master_characteristics *characteristics);
struct clk_hw * __init
+at91_clk_sama7g5_register_master(struct regmap *regmap,
+ const char *name, int num_parents,
+ const char **parent_names, u32 *mux_table,
+ spinlock_t *lock, u8 id, bool critical,
+ int chg_pid);
+
+struct clk_hw * __init
at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id);
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
- u32 id, const struct clk_range *range);
+ u32 id, const struct clk_range *range,
+ int chg_pid);
struct clk_hw * __init
at91_clk_register_pll(struct regmap *regmap, const char *name,
@@ -173,14 +187,23 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name);
struct clk_hw * __init
-sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
- const char *name, const char *parent_name, u8 id,
- const struct clk_pll_characteristics *characteristics);
+sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics,
+ const struct clk_pll_layout *layout, bool critical);
+
+struct clk_hw * __init
+sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw, u8 id,
+ const struct clk_pll_characteristics *characteristics,
+ const struct clk_pll_layout *layout, bool critical);
struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents, u8 id,
- const struct clk_programmable_layout *layout);
+ const struct clk_programmable_layout *layout,
+ u32 *mux_table);
struct clk_hw * __init
at91_clk_register_sam9260_slow(struct regmap *regmap,
@@ -213,6 +236,10 @@ struct clk_hw * __init
at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
const char *name, const char *parent_name);
+struct clk_hw * __init
+at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
#ifdef CONFIG_PM
void pmc_register_id(u8 id);
void pmc_register_pck(u8 pck);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 3e20aa68259f..ab6318c0589e 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -22,7 +22,7 @@ static const struct clk_master_layout sam9x60_master_layout = {
};
static const struct clk_range plla_outputs[] = {
- { .min = 300000000, .max = 600000000 },
+ { .min = 2343750, .max = 1200000000 },
};
static const struct clk_pll_characteristics plla_characteristics = {
@@ -42,6 +42,20 @@ static const struct clk_pll_characteristics upll_characteristics = {
.upll = true,
};
+static const struct clk_pll_layout pll_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+static const struct clk_pll_layout pll_div_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
static const struct clk_programmable_layout sam9x60_programmable_layout = {
.pres_mask = 0xff,
.pres_shift = 8,
@@ -156,6 +170,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
const char *td_slck_name, *md_slck_name, *mainxtal_name;
struct pmc_data *sam9x60_pmc;
const char *parent_names[6];
+ struct clk_hw *main_osc_hw;
struct regmap *regmap;
struct clk_hw *hw;
int i;
@@ -178,7 +193,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
@@ -189,7 +204,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (!sam9x60_pmc)
return;
- hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 24000000,
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
50000000);
if (IS_ERR(hw))
goto err_free;
@@ -200,6 +215,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
bypass);
if (IS_ERR(hw))
goto err_free;
+ main_osc_hw = hw;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
@@ -209,15 +225,31 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_pmc->chws[PMC_MAIN] = hw;
- hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "pllack",
- "mainck", 0, &plla_characteristics);
+ hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "pllack_fracck",
+ "mainck", sam9x60_pmc->chws[PMC_MAIN],
+ 0, &plla_characteristics,
+ &pll_frac_layout, true);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck",
+ "pllack_fracck", 0, &plla_characteristics,
+ &pll_div_layout, true);
if (IS_ERR(hw))
goto err_free;
sam9x60_pmc->chws[PMC_PLLACK] = hw;
- hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
- "main_osc", 1, &upll_characteristics);
+ hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "upllck_fracck",
+ "main_osc", main_osc_hw, 1,
+ &upll_characteristics,
+ &pll_frac_layout, false);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck",
+ "upllck_fracck", 1, &upll_characteristics,
+ &pll_div_layout, false);
if (IS_ERR(hw))
goto err_free;
@@ -225,7 +257,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = "mainck";
- parent_names[2] = "pllack";
+ parent_names[2] = "pllack_divck";
hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
&sam9x60_master_layout,
&mck_characteristics);
@@ -234,8 +266,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_pmc->chws[PMC_MCK] = hw;
- parent_names[0] = "pllack";
- parent_names[1] = "upllck";
+ parent_names[0] = "pllack_divck";
+ parent_names[1] = "upllck_divck";
parent_names[2] = "main_osc";
hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
if (IS_ERR(hw))
@@ -245,8 +277,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
parent_names[3] = "masterck";
- parent_names[4] = "pllack";
- parent_names[5] = "upllck";
+ parent_names[4] = "pllack_divck";
+ parent_names[5] = "upllck_divck";
for (i = 0; i < 8; i++) {
char name[6];
@@ -254,7 +286,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 6, i,
- &sam9x60_programmable_layout);
+ &sam9x60_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -277,7 +310,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_periphck[i].n,
"masterck",
sam9x60_periphck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
@@ -288,10 +321,9 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sam9x60_pcr_layout,
sam9x60_gck[i].n,
- parent_names, 6,
+ parent_names, NULL, 6,
sam9x60_gck[i].id,
- false,
- &sam9x60_gck[i].r);
+ &sam9x60_gck[i].r, INT_MIN);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69421d71daf..8b220762941a 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -116,21 +116,20 @@ static const struct {
char *n;
u8 id;
struct clk_range r;
- bool pll;
+ int chg_pid;
} sama5d2_gck[] = {
- { .n = "sdmmc0_gclk", .id = 31, },
- { .n = "sdmmc1_gclk", .id = 32, },
- { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, },
- { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, },
- { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
- { .n = "isc_gclk", .id = 46, },
- { .n = "pdmic_gclk", .id = 48, },
- { .n = "i2s0_gclk", .id = 54, .pll = true },
- { .n = "i2s1_gclk", .id = 55, .pll = true },
- { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, },
- { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, },
- { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 },
- .pll = true },
+ { .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
+ { .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
+ { .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "tcb1_gclk", .id = 36, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "pwm_gclk", .id = 38, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "isc_gclk", .id = 46, .chg_pid = INT_MIN, },
+ { .n = "pdmic_gclk", .id = 48, .chg_pid = INT_MIN, },
+ { .n = "i2s0_gclk", .id = 54, .chg_pid = 5, },
+ { .n = "i2s1_gclk", .id = 55, .chg_pid = 5, },
+ { .n = "can0_gclk", .id = 56, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
+ { .n = "can1_gclk", .id = 57, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
+ { .n = "classd_gclk", .id = 59, .chg_pid = 5, .r = { .min = 0, .max = 100000000 }, },
};
static const struct clk_programmable_layout sama5d2_programmable_layout = {
@@ -269,7 +268,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 6, i,
- &sama5d2_programmable_layout);
+ &sama5d2_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -292,7 +292,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
sama5d2_periphck[i].n,
"masterck",
sama5d2_periphck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
@@ -305,7 +305,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
sama5d2_periph32ck[i].n,
"h32mxck",
sama5d2_periph32ck[i].id,
- &sama5d2_periph32ck[i].r);
+ &sama5d2_periph32ck[i].r,
+ INT_MIN);
if (IS_ERR(hw))
goto err_free;
@@ -322,10 +323,10 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_gck[i].n,
- parent_names, 6,
+ parent_names, NULL, 6,
sama5d2_gck[i].id,
- sama5d2_gck[i].pll,
- &sama5d2_gck[i].r);
+ &sama5d2_gck[i].r,
+ sama5d2_gck[i].chg_pid);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 5e4e44dd4c37..7c6e0a5b9dc8 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -121,7 +121,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
@@ -200,7 +200,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91sam9x5_programmable_layout);
+ &at91sam9x5_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -223,7 +224,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
sama5d3_periphck[i].n,
"masterck",
sama5d3_periphck[i].id,
- &sama5d3_periphck[i].r);
+ &sama5d3_periphck[i].r,
+ INT_MIN);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 662ff5fa6e98..92d8d4141b43 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -223,7 +223,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
hw = at91_clk_register_programmable(regmap, name,
parent_names, 5, i,
- &at91sam9x5_programmable_layout);
+ &at91sam9x5_programmable_layout,
+ NULL);
if (IS_ERR(hw))
goto err_free;
@@ -246,7 +247,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
sama5d4_periphck[i].n,
"masterck",
sama5d4_periphck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
@@ -259,7 +260,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
sama5d4_periph32ck[i].n,
"h32mxck",
sama5d4_periph32ck[i].id,
- &range);
+ &range, INT_MIN);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
new file mode 100644
index 000000000000..0db2ab3eca14
--- /dev/null
+++ b/drivers/clk/at91/sama7g5.c
@@ -0,0 +1,1059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7G5 PMC code.
+ *
+ * Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+#define SAMA7G5_INIT_TABLE(_table, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) \
+ (_table)[_i] = _i; \
+ } while (0)
+
+#define SAMA7G5_FILL_TABLE(_to, _from, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) { \
+ (_to)[_i] = (_from)[_i]; \
+ } \
+ } while (0)
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+/**
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_IMG: Image subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_IMG,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_MAX,
+};
+
+/**
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV,
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/**
+ * PLL clocks description
+ * @n: clock name
+ * @p: clock parent
+ * @l: clock layout
+ * @t: clock type
+ * @f: true if clock is critical and cannot be disabled
+ * @eid: export index in sama7g5->chws[] array
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ const struct clk_pll_layout *l;
+ u8 t;
+ u8 c;
+ u8 eid;
+} sama7g5_plls[][PLL_ID_MAX] = {
+ [PLL_ID_CPU] = {
+ { .n = "cpupll_fracck",
+ .p = "mainck",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC,
+ .c = 1, },
+
+ { .n = "cpupll_divpmcck",
+ .p = "cpupll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV,
+ .c = 1, },
+ },
+
+ [PLL_ID_SYS] = {
+ { .n = "syspll_fracck",
+ .p = "mainck",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC,
+ .c = 1, },
+
+ { .n = "syspll_divpmcck",
+ .p = "syspll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV,
+ .c = 1, },
+ },
+
+ [PLL_ID_DDR] = {
+ { .n = "ddrpll_fracck",
+ .p = "mainck",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC,
+ .c = 1, },
+
+ { .n = "ddrpll_divpmcck",
+ .p = "ddrpll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV,
+ .c = 1, },
+ },
+
+ [PLL_ID_IMG] = {
+ { .n = "imgpll_fracck",
+ .p = "mainck",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC, },
+
+ { .n = "imgpll_divpmcck",
+ .p = "imgpll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV, },
+ },
+
+ [PLL_ID_BAUD] = {
+ { .n = "baudpll_fracck",
+ .p = "mainck",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC, },
+
+ { .n = "baudpll_divpmcck",
+ .p = "baudpll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV, },
+ },
+
+ [PLL_ID_AUDIO] = {
+ { .n = "audiopll_fracck",
+ .p = "main_xtal",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC, },
+
+ { .n = "audiopll_divpmcck",
+ .p = "audiopll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV,
+ .eid = PMC_I2S0_MUX, },
+
+ { .n = "audiopll_diviock",
+ .p = "audiopll_fracck",
+ .l = &pll_layout_divio,
+ .t = PLL_TYPE_DIV,
+ .eid = PMC_I2S1_MUX, },
+ },
+
+ [PLL_ID_ETH] = {
+ { .n = "ethpll_fracck",
+ .p = "main_xtal",
+ .l = &pll_layout_frac,
+ .t = PLL_TYPE_FRAC, },
+
+ { .n = "ethpll_divpmcck",
+ .p = "ethpll_fracck",
+ .l = &pll_layout_divpmc,
+ .t = PLL_TYPE_DIV, },
+ },
+};
+
+/**
+ * Master clock (MCK[1..4]) description
+ * @n: clock name
+ * @ep: extra parents names array
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @c: true if clock is critical and cannot be disabled
+ */
+static const struct {
+ const char *n;
+ const char *ep[4];
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 c;
+} sama7g5_mckx[] = {
+ { .n = "mck1",
+ .id = 1,
+ .ep = { "syspll_divpmcck", },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { "ddrpll_divpmcck", },
+ .ep_mux_table = { 6, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", },
+ .ep_mux_table = { 5, 6, 7, },
+ .ep_count = 3,
+ .ep_chg_id = 6, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { "syspll_divpmcck", },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+};
+
+/**
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7g5_systemck[] = {
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, };
+
+/**
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7g5_periphck[] = {
+ { .n = "pioA_clk", .p = "mck0", .id = 11, },
+ { .n = "sfr_clk", .p = "mck1", .id = 19, },
+ { .n = "hsmc_clk", .p = "mck1", .id = 21, },
+ { .n = "xdmac0_clk", .p = "mck1", .id = 22, },
+ { .n = "xdmac1_clk", .p = "mck1", .id = 23, },
+ { .n = "xdmac2_clk", .p = "mck1", .id = 24, },
+ { .n = "acc_clk", .p = "mck1", .id = 25, },
+ { .n = "aes_clk", .p = "mck1", .id = 27, },
+ { .n = "tzaesbasc_clk", .p = "mck1", .id = 28, },
+ { .n = "asrc_clk", .p = "mck1", .id = 30, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = "mck0", .id = 32, },
+ { .n = "csi_clk", .p = "mck3", .id = 33, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "csi2dc_clk", .p = "mck3", .id = 34, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "eic_clk", .p = "mck1", .id = 37, },
+ { .n = "flex0_clk", .p = "mck1", .id = 38, },
+ { .n = "flex1_clk", .p = "mck1", .id = 39, },
+ { .n = "flex2_clk", .p = "mck1", .id = 40, },
+ { .n = "flex3_clk", .p = "mck1", .id = 41, },
+ { .n = "flex4_clk", .p = "mck1", .id = 42, },
+ { .n = "flex5_clk", .p = "mck1", .id = 43, },
+ { .n = "flex6_clk", .p = "mck1", .id = 44, },
+ { .n = "flex7_clk", .p = "mck1", .id = 45, },
+ { .n = "flex8_clk", .p = "mck1", .id = 46, },
+ { .n = "flex9_clk", .p = "mck1", .id = 47, },
+ { .n = "flex10_clk", .p = "mck1", .id = 48, },
+ { .n = "flex11_clk", .p = "mck1", .id = 49, },
+ { .n = "gmac0_clk", .p = "mck1", .id = 51, },
+ { .n = "gmac1_clk", .p = "mck1", .id = 52, },
+ { .n = "icm_clk", .p = "mck1", .id = 55, },
+ { .n = "isc_clk", .p = "mck3", .id = 56, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "i2smcc0_clk", .p = "mck1", .id = 57, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = "mck1", .id = 58, .r = { .max = 200000000, }, },
+ { .n = "matrix_clk", .p = "mck1", .id = 60, },
+ { .n = "mcan0_clk", .p = "mck1", .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = "mck1", .id = 62, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = "mck1", .id = 63, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = "mck1", .id = 64, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = "mck1", .id = 65, .r = { .max = 200000000, }, },
+ { .n = "mcan5_clk", .p = "mck1", .id = 66, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = "mck1", .id = 68, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = "mck1", .id = 69, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = "mck1", .id = 70, },
+ { .n = "pit64b1_clk", .p = "mck1", .id = 71, },
+ { .n = "pit64b2_clk", .p = "mck1", .id = 72, },
+ { .n = "pit64b3_clk", .p = "mck1", .id = 73, },
+ { .n = "pit64b4_clk", .p = "mck1", .id = 74, },
+ { .n = "pit64b5_clk", .p = "mck1", .id = 75, },
+ { .n = "pwm_clk", .p = "mck1", .id = 77, },
+ { .n = "qspi0_clk", .p = "mck1", .id = 78, },
+ { .n = "qspi1_clk", .p = "mck1", .id = 79, },
+ { .n = "sdmmc0_clk", .p = "mck1", .id = 80, },
+ { .n = "sdmmc1_clk", .p = "mck1", .id = 81, },
+ { .n = "sdmmc2_clk", .p = "mck1", .id = 82, },
+ { .n = "sha_clk", .p = "mck1", .id = 83, },
+ { .n = "spdifrx_clk", .p = "mck1", .id = 84, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = "mck1", .id = 85, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = "mck1", .id = 86, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = "mck1", .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = "mck1", .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = "mck1", .id = 89, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = "mck1", .id = 90, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = "mck1", .id = 91, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = "mck1", .id = 92, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = "mck1", .id = 93, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = "mck1", .id = 94, },
+ { .n = "tcpcb_clk", .p = "mck1", .id = 95, },
+ { .n = "tdes_clk", .p = "mck1", .id = 96, },
+ { .n = "trng_clk", .p = "mck1", .id = 97, },
+ { .n = "udphsa_clk", .p = "mck1", .id = 104, },
+ { .n = "udphsb_clk", .p = "mck1", .id = 105, },
+ { .n = "uhphs_clk", .p = "mck1", .id = 106, },
+};
+
+/**
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parrent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7g5_gck[] = {
+ { .n = "adc_gclk",
+ .id = 26,
+ .r = { .max = 100000000, },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 7, 9, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 30,
+ .r = { .max = 200000000 },
+ .pp = { "audiopll_divpmcck", },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "csi_gclk",
+ .id = 33,
+ .r = { .max = 27000000 },
+ .pp = { "ddrpll_divpmcck", "imgpll_divpmcck", },
+ .pp_mux_table = { 6, 7, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 38,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 39,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 40,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 41,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 42,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 43,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 44,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 45,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 46,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 47,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 48,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex11_gclk",
+ .id = 49,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 51,
+ .r = { .max = 125000000 },
+ .pp = { "ethpll_divpmcck", },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 52,
+ .r = { .max = 50000000 },
+ .pp = { "ethpll_divpmcck", },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 53,
+ .r = { .max = 300000000 },
+ .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 9, 10, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 54,
+ .r = { .max = 300000000 },
+ .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 9, 10, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 57,
+ .r = { .max = 100000000 },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 58,
+ .r = { .max = 100000000 },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "mcan0_gclk",
+ .id = 61,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "mcan1_gclk",
+ .id = 62,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "mcan2_gclk",
+ .id = 63,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "mcan3_gclk",
+ .id = 64,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "mcan4_gclk",
+ .id = 65,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "mcan5_gclk",
+ .id = 66,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc0_gclk",
+ .id = 68,
+ .r = { .max = 50000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 69,
+ .r = { .max = 50000000, },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 70,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 71,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 72,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 73,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 74,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 75,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 78,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 79,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 80,
+ .r = { .max = 208000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 81,
+ .r = { .max = 208000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 82,
+ .r = { .max = 208000000 },
+ .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "spdifrx_gclk",
+ .id = 84,
+ .r = { .max = 150000000 },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "spdiftx_gclk",
+ .id = 85,
+ .r = { .max = 25000000 },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
+ .pp_count = 2,
+ .pp_chg_id = 5, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 88,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 91,
+ .r = { .max = 200000000 },
+ .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+ "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp_mux_table = { 5, 7, 8, 9, 10, },
+ .pp_count = 5,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcpca_gclk",
+ .id = 94,
+ .r = { .max = 32768, },
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcpcb_gclk",
+ .id = 95,
+ .r = { .max = 32768, },
+ .pp_chg_id = INT_MIN, },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 140000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7g5_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7g5_pmc_setup(struct device_node *np)
+{
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sama7g5_pmc;
+ const char *parent_names[10];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ bool bypass;
+ int i, j;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ nck(sama7g5_systemck),
+ nck(sama7g5_periphck),
+ nck(sama7g5_gck));
+ if (!sama7g5_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7g5_mckx) + ARRAY_SIZE(sama7g5_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < 3; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7g5_plls[i][j].n)
+ continue;
+
+ switch (sama7g5_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ if (!strcmp(sama7g5_plls[i][j].p, "mainck"))
+ parent_hw = sama7g5_pmc->chws[PMC_MAIN];
+ else
+ parent_hw = __clk_get_hw(of_clk_get_by_name(np,
+ sama7g5_plls[i][j].p));
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7g5_plls[i][j].n,
+ sama7g5_plls[i][j].p, parent_hw, i,
+ &pll_characteristics,
+ sama7g5_plls[i][j].l,
+ sama7g5_plls[i][j].c);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7g5_plls[i][j].n,
+ sama7g5_plls[i][j].p, i,
+ &pll_characteristics,
+ sama7g5_plls[i][j].l,
+ sama7g5_plls[i][j].c);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ if (sama7g5_plls[i][j].eid)
+ sama7g5_pmc->chws[sama7g5_plls[i][j].eid] = hw;
+ }
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "cpupll_divpmcck";
+ parent_names[3] = "syspll_divpmcck";
+ hw = at91_clk_register_master(regmap, "mck0", 4, parent_names,
+ &mck0_layout, &mck0_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "mck0";
+ for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) {
+ u8 num_parents = 4 + sama7g5_mckx[i].ep_count;
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ SAMA7G5_INIT_TABLE(mux_table, 4);
+ SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_mckx[i].ep_mux_table,
+ sama7g5_mckx[i].ep_count);
+ SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_mckx[i].ep,
+ sama7g5_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
+ num_parents, parent_names, mux_table,
+ &pmc_mckX_lock, sama7g5_mckx[i].id,
+ sama7g5_mckx[i].c,
+ sama7g5_mckx[i].ep_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+ }
+
+ hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", "main_xtal");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "mck0";
+ parent_names[4] = "syspll_divpmcck";
+ parent_names[5] = "ddrpll_divpmcck";
+ parent_names[6] = "imgpll_divpmcck";
+ parent_names[7] = "baudpll_divpmcck";
+ parent_names[8] = "audiopll_divpmcck";
+ parent_names[9] = "ethpll_divpmcck";
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, parent_names,
+ 10, i,
+ &programmable_layout,
+ sama7g5_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7g5_systemck[i].n,
+ sama7g5_systemck[i].p,
+ sama7g5_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->shws[sama7g5_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7g5_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7g5_pcr_layout,
+ sama7g5_periphck[i].n,
+ sama7g5_periphck[i].p,
+ sama7g5_periphck[i].id,
+ &sama7g5_periphck[i].r,
+ sama7g5_periphck[i].chgp ? 0 :
+ INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->phws[sama7g5_periphck[i].id] = hw;
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "mck0";
+ for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) {
+ u8 num_parents = 4 + sama7g5_gck[i].pp_count;
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ SAMA7G5_INIT_TABLE(mux_table, 4);
+ SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_gck[i].pp_mux_table,
+ sama7g5_gck[i].pp_count);
+ SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_gck[i].pp,
+ sama7g5_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7g5_pcr_layout,
+ sama7g5_gck[i].n,
+ parent_names, mux_table,
+ num_parents,
+ sama7g5_gck[i].id,
+ &sama7g5_gck[i].r,
+ sama7g5_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->ghws[sama7g5_gck[i].id] = hw;
+ alloc_mem[alloc_mem_size++] = mux_table;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7g5_pmc);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ pmc_data_free(sama7g5_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7g5_pmc, "microchip,sama7g5-pmc", sama7g5_pmc_setup);
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 15dc4cd86d76..2d65770d8665 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -471,8 +471,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
if (!regbase)
return;
- slow_rc = clk_hw_register_fixed_rate(NULL, parent_names[0], NULL, 0,
- 32768);
+ slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0],
+ NULL, 0, 32768,
+ 93750000);
if (IS_ERR(slow_rc))
return;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 027eba31f793..3439bc65bb4e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -314,6 +314,7 @@ struct bcm2835_cprman {
struct device *dev;
void __iomem *regs;
spinlock_t regs_lock; /* spinlock for all clocks */
+ unsigned int soc;
/*
* Real names of cprman clock parents looked up through
@@ -526,6 +527,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw)
A2W_PLL_CTRL_PRST_DISABLE;
}
+static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
+ const struct bcm2835_pll_data *data)
+{
+ /*
+ * On BCM2711 there isn't a pre-divisor available in the PLL feedback
+ * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
+ * for to for VCO RANGE bits.
+ */
+ if (cprman->soc & SOC_BCM2711)
+ return 0;
+
+ return data->ana->fb_prediv_mask;
+}
+
static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
unsigned long parent_rate,
u32 *ndiv, u32 *fdiv)
@@ -583,7 +598,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
- data->ana->fb_prediv_mask;
+ bcm2835_pll_get_prediv_mask(cprman, data);
if (using_prediv) {
ndiv *= 2;
@@ -666,6 +681,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
struct bcm2835_cprman *cprman = pll->cprman;
const struct bcm2835_pll_data *data = pll->data;
+ u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
u32 ndiv, fdiv, a2w_ctl;
u32 ana[4];
@@ -683,7 +699,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
for (i = 3; i >= 0; i--)
ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
- was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
+ was_using_prediv = ana[1] & prediv_mask;
ana[0] &= ~data->ana->mask0;
ana[0] |= data->ana->set0;
@@ -693,10 +709,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
ana[3] |= data->ana->set3;
if (was_using_prediv && !use_fb_prediv) {
- ana[1] &= ~data->ana->fb_prediv_mask;
+ ana[1] &= ~prediv_mask;
do_ana_setup_first = true;
} else if (!was_using_prediv && use_fb_prediv) {
- ana[1] |= data->ana->fb_prediv_mask;
+ ana[1] |= prediv_mask;
do_ana_setup_first = false;
} else {
do_ana_setup_first = true;
@@ -2262,6 +2278,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cprman);
cprman->onecell.num = asize;
+ cprman->soc = pdata->soc;
hws = cprman->onecell.hws;
for (i = 0; i < asize; i++) {
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index 6fb8af506777..e062dd4992ea 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -119,7 +119,7 @@ static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
if (rate == *parent_rate)
return *parent_rate;
- div = DIV_ROUND_UP(*parent_rate, rate);
+ div = DIV_ROUND_CLOSEST(*parent_rate, rate);
if (div < 2)
return *parent_rate;
@@ -145,7 +145,7 @@ static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
- div = DIV_ROUND_UP(parent_rate, rate);
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
if (div < 2)
return -EINVAL;
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 87fe0b0e01a3..86f2e2d3fc02 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -89,7 +89,12 @@ static int clk_pwm_probe(struct platform_device *pdev)
}
if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
- clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period;
+ clk_pwm->fixed_rate = div64_u64(NSEC_PER_SEC, pargs.period);
+
+ if (!clk_pwm->fixed_rate) {
+ dev_err(&pdev->dev, "fixed_rate cannot be zero\n");
+ return -EINVAL;
+ }
if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 374afcab89af..5942e9874bc0 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -244,6 +244,14 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
},
};
+static const struct clockgen_muxinfo ls1021a_cmux = {
+ {
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ }
+};
+
static const struct clockgen_muxinfo ls1028a_hwa1 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
@@ -577,7 +585,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
{
.compat = "fsl,ls1021a-clockgen",
.cmux_groups = {
- &t1023_cmux
+ &ls1021a_cmux
},
.cmux_to_group = {
0, -1
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
new file mode 100644
index 000000000000..0fad0c1a0186
--- /dev/null
+++ b/drivers/clk/clk-sparx5.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip Sparx5 SoC Clock driver.
+ *
+ * Copyright (c) 2019 Microchip Inc.
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/microchip,sparx5.h>
+
+#define PLL_DIV GENMASK(7, 0)
+#define PLL_PRE_DIV GENMASK(10, 8)
+#define PLL_ROT_DIR BIT(11)
+#define PLL_ROT_SEL GENMASK(13, 12)
+#define PLL_ROT_ENA BIT(14)
+#define PLL_CLK_ENA BIT(15)
+
+#define MAX_SEL 4
+#define MAX_PRE BIT(3)
+
+static const u8 sel_rates[MAX_SEL] = { 0, 2*8, 2*4, 2*2 };
+
+static const char *clk_names[N_CLOCKS] = {
+ "core", "ddr", "cpu2", "arm2",
+ "aux1", "aux2", "aux3", "aux4",
+ "synce",
+};
+
+struct s5_hw_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+struct s5_clk_data {
+ void __iomem *base;
+ struct s5_hw_clk s5_hw[N_CLOCKS];
+};
+
+struct s5_pll_conf {
+ unsigned long freq;
+ u8 div;
+ bool rot_ena;
+ u8 rot_sel;
+ u8 rot_dir;
+ u8 pre_div;
+};
+
+#define to_s5_pll(hw) container_of(hw, struct s5_hw_clk, hw)
+
+static unsigned long s5_calc_freq(unsigned long parent_rate,
+ const struct s5_pll_conf *conf)
+{
+ unsigned long rate = parent_rate / conf->div;
+
+ if (conf->rot_ena) {
+ int sign = conf->rot_dir ? -1 : 1;
+ int divt = sel_rates[conf->rot_sel] * (1 + conf->pre_div);
+ int divb = divt + sign;
+
+ rate = mult_frac(rate, divt, divb);
+ rate = roundup(rate, 1000);
+ }
+
+ return rate;
+}
+
+static void s5_search_fractional(unsigned long rate,
+ unsigned long parent_rate,
+ int div,
+ struct s5_pll_conf *conf)
+{
+ struct s5_pll_conf best;
+ ulong cur_offset, best_offset = rate;
+ int d, i, j;
+
+ memset(conf, 0, sizeof(*conf));
+ conf->div = div;
+ conf->rot_ena = 1; /* Fractional rate */
+
+ for (d = 0; best_offset > 0 && d <= 1 ; d++) {
+ conf->rot_dir = !!d;
+ for (i = 0; best_offset > 0 && i < MAX_PRE; i++) {
+ conf->pre_div = i;
+ for (j = 1; best_offset > 0 && j < MAX_SEL; j++) {
+ conf->rot_sel = j;
+ conf->freq = s5_calc_freq(parent_rate, conf);
+ cur_offset = abs(rate - conf->freq);
+ if (cur_offset < best_offset) {
+ best_offset = cur_offset;
+ best = *conf;
+ }
+ }
+ }
+ }
+
+ /* Best match */
+ *conf = best;
+}
+
+static unsigned long s5_calc_params(unsigned long rate,
+ unsigned long parent_rate,
+ struct s5_pll_conf *conf)
+{
+ if (parent_rate % rate) {
+ struct s5_pll_conf alt1, alt2;
+ int div;
+
+ div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
+ s5_search_fractional(rate, parent_rate, div, &alt1);
+
+ /* Straight match? */
+ if (alt1.freq == rate) {
+ *conf = alt1;
+ } else {
+ /* Try without rounding divider */
+ div = parent_rate / rate;
+ if (div != alt1.div) {
+ s5_search_fractional(rate, parent_rate, div,
+ &alt2);
+ /* Select the better match */
+ if (abs(rate - alt1.freq) <
+ abs(rate - alt2.freq))
+ *conf = alt1;
+ else
+ *conf = alt2;
+ }
+ }
+ } else {
+ /* Straight fit */
+ memset(conf, 0, sizeof(*conf));
+ conf->div = parent_rate / rate;
+ }
+
+ return conf->freq;
+}
+
+static int s5_pll_enable(struct clk_hw *hw)
+{
+ struct s5_hw_clk *pll = to_s5_pll(hw);
+ u32 val = readl(pll->reg);
+
+ val |= PLL_CLK_ENA;
+ writel(val, pll->reg);
+
+ return 0;
+}
+
+static void s5_pll_disable(struct clk_hw *hw)
+{
+ struct s5_hw_clk *pll = to_s5_pll(hw);
+ u32 val = readl(pll->reg);
+
+ val &= ~PLL_CLK_ENA;
+ writel(val, pll->reg);
+}
+
+static int s5_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct s5_hw_clk *pll = to_s5_pll(hw);
+ struct s5_pll_conf conf;
+ unsigned long eff_rate;
+ u32 val;
+
+ eff_rate = s5_calc_params(rate, parent_rate, &conf);
+ if (eff_rate != rate)
+ return -EOPNOTSUPP;
+
+ val = readl(pll->reg) & PLL_CLK_ENA;
+ val |= FIELD_PREP(PLL_DIV, conf.div);
+ if (conf.rot_ena) {
+ val |= PLL_ROT_ENA;
+ val |= FIELD_PREP(PLL_ROT_SEL, conf.rot_sel);
+ val |= FIELD_PREP(PLL_PRE_DIV, conf.pre_div);
+ if (conf.rot_dir)
+ val |= PLL_ROT_DIR;
+ }
+ writel(val, pll->reg);
+
+ return 0;
+}
+
+static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct s5_hw_clk *pll = to_s5_pll(hw);
+ struct s5_pll_conf conf;
+ u32 val;
+
+ val = readl(pll->reg);
+
+ if (val & PLL_CLK_ENA) {
+ conf.div = FIELD_GET(PLL_DIV, val);
+ conf.pre_div = FIELD_GET(PLL_PRE_DIV, val);
+ conf.rot_ena = FIELD_GET(PLL_ROT_ENA, val);
+ conf.rot_dir = FIELD_GET(PLL_ROT_DIR, val);
+ conf.rot_sel = FIELD_GET(PLL_ROT_SEL, val);
+
+ conf.freq = s5_calc_freq(parent_rate, &conf);
+ } else {
+ conf.freq = 0;
+ }
+
+ return conf.freq;
+}
+
+static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct s5_pll_conf conf;
+
+ return s5_calc_params(rate, *parent_rate, &conf);
+}
+
+static const struct clk_ops s5_pll_ops = {
+ .enable = s5_pll_enable,
+ .disable = s5_pll_disable,
+ .set_rate = s5_pll_set_rate,
+ .round_rate = s5_pll_round_rate,
+ .recalc_rate = s5_pll_recalc_rate,
+};
+
+static struct clk_hw *s5_clk_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct s5_clk_data *s5_clk = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= N_CLOCKS) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &s5_clk->s5_hw[idx].hw;
+}
+
+static int s5_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int i, ret;
+ struct s5_clk_data *s5_clk;
+ struct clk_parent_data pdata = { .index = 0 };
+ struct clk_init_data init = {
+ .ops = &s5_pll_ops,
+ .num_parents = 1,
+ .parent_data = &pdata,
+ };
+
+ s5_clk = devm_kzalloc(dev, sizeof(*s5_clk), GFP_KERNEL);
+ if (!s5_clk)
+ return -ENOMEM;
+
+ s5_clk->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(s5_clk->base))
+ return PTR_ERR(s5_clk->base);
+
+ for (i = 0; i < N_CLOCKS; i++) {
+ struct s5_hw_clk *s5_hw = &s5_clk->s5_hw[i];
+
+ init.name = clk_names[i];
+ s5_hw->reg = s5_clk->base + (i * 4);
+ s5_hw->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &s5_hw->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n",
+ init.name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, s5_clk_hw_get, s5_clk);
+}
+
+static const struct of_device_id s5_clk_dt_ids[] = {
+ { .compatible = "microchip,sparx5-dpll", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s5_clk_dt_ids);
+
+static struct platform_driver s5_clk_driver = {
+ .probe = s5_clk_probe,
+ .driver = {
+ .name = "sparx5-clk",
+ .of_match_table = s5_clk_dt_ids,
+ },
+};
+builtin_platform_driver(s5_clk_driver);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 9a5fb3834b9a..c90460e7ef21 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -167,6 +167,12 @@ struct vc5_hw_data {
u32 div_int;
u32 div_frc;
unsigned int num;
+};
+
+struct vc5_out_data {
+ struct clk_hw hw;
+ struct vc5_driver_data *vc5;
+ unsigned int num;
unsigned int clk_output_cfg0;
unsigned int clk_output_cfg0_mask;
};
@@ -184,7 +190,7 @@ struct vc5_driver_data {
struct clk_hw clk_pfd;
struct vc5_hw_data clk_pll;
struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM];
- struct vc5_hw_data clk_out[VC5_MAX_CLK_OUT_NUM];
+ struct vc5_out_data clk_out[VC5_MAX_CLK_OUT_NUM];
};
/*
@@ -567,7 +573,7 @@ static const struct clk_ops vc5_fod_ops = {
static int vc5_clk_out_prepare(struct clk_hw *hw)
{
- struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT |
@@ -609,7 +615,7 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
static void vc5_clk_out_unprepare(struct clk_hw *hw)
{
- struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
/* Disable the clock buffer */
@@ -619,7 +625,7 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw)
static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
{
- struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT |
@@ -649,7 +655,7 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
static int vc5_clk_out_set_parent(struct clk_hw *hw, u8 index)
{
- struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+ struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_RESET |
VC5_OUT_DIV_CONTROL_SELB_NORM |
@@ -704,7 +710,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
}
static int vc5_update_mode(struct device_node *np_output,
- struct vc5_hw_data *clk_out)
+ struct vc5_out_data *clk_out)
{
u32 value;
@@ -729,7 +735,7 @@ static int vc5_update_mode(struct device_node *np_output,
}
static int vc5_update_power(struct device_node *np_output,
- struct vc5_hw_data *clk_out)
+ struct vc5_out_data *clk_out)
{
u32 value;
@@ -754,7 +760,7 @@ static int vc5_update_power(struct device_node *np_output,
}
static int vc5_update_slew(struct device_node *np_output,
- struct vc5_hw_data *clk_out)
+ struct vc5_out_data *clk_out)
{
u32 value;
@@ -782,17 +788,20 @@ static int vc5_update_slew(struct device_node *np_output,
}
static int vc5_get_output_config(struct i2c_client *client,
- struct vc5_hw_data *clk_out)
+ struct vc5_out_data *clk_out)
{
struct device_node *np_output;
char *child_name;
int ret = 0;
child_name = kasprintf(GFP_KERNEL, "OUT%d", clk_out->num + 1);
+ if (!child_name)
+ return -ENOMEM;
+
np_output = of_get_child_by_name(client->dev.of_node, child_name);
kfree(child_name);
if (!np_output)
- goto output_done;
+ return 0;
ret = vc5_update_mode(np_output, clk_out);
if (ret)
@@ -813,7 +822,6 @@ output_error:
of_node_put(np_output);
-output_done:
return ret;
}
@@ -828,7 +836,7 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
int ret;
vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL);
- if (vc5 == NULL)
+ if (!vc5)
return -ENOMEM;
i2c_set_clientdata(client, vc5);
@@ -882,11 +890,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
init.parent_names = parent_names;
vc5->clk_mux.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_mux);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n", init.name);
- goto err_clk;
- }
if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) {
/* Register frequency doubler */
@@ -900,12 +906,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
init.num_parents = 1;
vc5->clk_mul.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n",
- init.name);
- goto err_clk;
- }
}
/* Register PFD */
@@ -921,11 +924,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
init.num_parents = 1;
vc5->clk_pfd.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n", init.name);
- goto err_clk;
- }
/* Register PLL */
memset(&init, 0, sizeof(init));
@@ -939,11 +940,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
vc5->clk_pll.vc5 = vc5;
vc5->clk_pll.hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_pll.hw);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n", init.name);
- goto err_clk;
- }
/* Register FODs */
for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) {
@@ -960,12 +959,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
vc5->clk_fod[n].vc5 = vc5;
vc5->clk_fod[n].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n",
- init.name);
- goto err_clk;
- }
}
/* Register MUX-connected OUT0_I2C_SELB output */
@@ -981,11 +977,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
vc5->clk_out[0].vc5 = vc5;
vc5->clk_out[0].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[0].hw);
- kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n", init.name);
- goto err_clk;
- }
+ if (ret)
+ goto err_clk_register;
+ kfree(init.name); /* clock framework made a copy of the name */
/* Register FOD-connected OUTx outputs */
for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) {
@@ -1008,12 +1002,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
vc5->clk_out[n].vc5 = vc5;
vc5->clk_out[n].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[n].hw);
+ if (ret)
+ goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
- if (ret) {
- dev_err(&client->dev, "unable to register %s\n",
- init.name);
- goto err_clk;
- }
/* Fetch Clock Output configuration from DT (if specified) */
ret = vc5_get_output_config(client, &vc5->clk_out[n]);
@@ -1029,6 +1020,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
+err_clk_register:
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ kfree(init.name); /* clock framework made a copy of the name */
err_clk:
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
clk_unregister_fixed_rate(vc5->pin_xin);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 236923b25543..0a9261a099bd 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -500,12 +500,6 @@ static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
return core->accuracy;
}
-unsigned long __clk_get_flags(struct clk *clk)
-{
- return !clk ? 0 : clk->core->flags;
-}
-EXPORT_SYMBOL_GPL(__clk_get_flags);
-
unsigned long clk_hw_get_flags(const struct clk_hw *hw)
{
return hw->core->flags;
@@ -3054,6 +3048,31 @@ static int clk_rate_set(void *data, u64 val)
}
#define clk_rate_mode 0644
+
+static int clk_prepare_enable_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int ret = 0;
+
+ if (val)
+ ret = clk_prepare_enable(core->hw->clk);
+ else
+ clk_disable_unprepare(core->hw->clk);
+
+ return ret;
+}
+
+static int clk_prepare_enable_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ *val = core->enable_count && core->prepare_count;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
+ clk_prepare_enable_set, "%llu\n");
+
#else
#define clk_rate_set NULL
#define clk_rate_mode 0444
@@ -3231,6 +3250,10 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
debugfs_create_file("clk_duty_cycle", 0444, root, core,
&clk_duty_cycle_fops);
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+ debugfs_create_file("clk_prepare_enable", 0644, root, core,
+ &clk_prepare_enable_fops);
+#endif
if (core->num_parents > 0)
debugfs_create_file("clk_parent", 0444, root, core,
@@ -4135,6 +4158,7 @@ static int devm_clk_hw_match(struct device *dev, void *res, void *data)
/**
* devm_clk_unregister - resource managed clk_unregister()
+ * @dev: device that is unregistering the clock data
* @clk: clock to unregister
*
* Deallocate a clock allocated with devm_clk_register(). Normally
@@ -4324,6 +4348,8 @@ static void clk_core_reparent_orphans(void)
* @node: Pointer to device tree node of clock provider
* @get: Get clock callback. Returns NULL or a struct clk for the
* given clock specifier
+ * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
+ * struct clk_hw for the given clock specifier
* @data: context pointer to be passed into @get callback
*/
struct of_clk_provider {
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 8a23d5dfd1f8..6c35e4bb7940 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -651,7 +651,7 @@ static int davinci_pll_sysclk_rate_change(struct notifier_block *nb,
pllcmd = readl(pll->base + PLLCMD);
pllcmd |= PLLCMD_GOSET;
writel(pllcmd, pll->base + PLLCMD);
- /* fallthrough */
+ fallthrough;
case PRE_RATE_CHANGE:
/* Wait until for outstanding changes to take effect */
do {
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index a7db93030e02..b20cdea3e9cc 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -433,7 +433,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_USB_VF610:
pll->div_shift = 1;
- /* fall through */
+ fallthrough;
case IMX_PLLV3_USB:
ops = &clk_pllv3_ops;
pll->powerup_set = true;
@@ -441,7 +441,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
case IMX_PLLV3_AV_IMX7:
pll->num_offset = PLL_IMX7_NUM_OFFSET;
pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
- /* fall through */
+ fallthrough;
case IMX_PLLV3_AV:
ops = &clk_pllv3_av_ops;
break;
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 6c5b8029cc8a..0268d23ebe2e 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2013-2015 Imagination Technologies
* Author: Paul Burton <paul.burton@mips.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/clk-provider.h>
@@ -19,49 +20,50 @@
/* CGU register offsets */
#define CGU_REG_CLOCKCONTROL 0x00
-#define CGU_REG_LCR 0x04
-#define CGU_REG_APLL 0x10
-#define CGU_REG_MPLL 0x14
-#define CGU_REG_EPLL 0x18
-#define CGU_REG_VPLL 0x1c
-#define CGU_REG_CLKGR0 0x20
-#define CGU_REG_OPCR 0x24
-#define CGU_REG_CLKGR1 0x28
-#define CGU_REG_DDRCDR 0x2c
-#define CGU_REG_VPUCDR 0x30
-#define CGU_REG_USBPCR 0x3c
-#define CGU_REG_USBRDT 0x40
-#define CGU_REG_USBVBFIL 0x44
-#define CGU_REG_USBPCR1 0x48
-#define CGU_REG_LP0CDR 0x54
-#define CGU_REG_I2SCDR 0x60
-#define CGU_REG_LP1CDR 0x64
-#define CGU_REG_MSC0CDR 0x68
-#define CGU_REG_UHCCDR 0x6c
-#define CGU_REG_SSICDR 0x74
-#define CGU_REG_CIMCDR 0x7c
-#define CGU_REG_PCMCDR 0x84
-#define CGU_REG_GPUCDR 0x88
-#define CGU_REG_HDMICDR 0x8c
-#define CGU_REG_MSC1CDR 0xa4
-#define CGU_REG_MSC2CDR 0xa8
-#define CGU_REG_BCHCDR 0xac
-#define CGU_REG_CLOCKSTATUS 0xd4
+#define CGU_REG_LCR 0x04
+#define CGU_REG_APLL 0x10
+#define CGU_REG_MPLL 0x14
+#define CGU_REG_EPLL 0x18
+#define CGU_REG_VPLL 0x1c
+#define CGU_REG_CLKGR0 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_CLKGR1 0x28
+#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_VPUCDR 0x30
+#define CGU_REG_USBPCR 0x3c
+#define CGU_REG_USBRDT 0x40
+#define CGU_REG_USBVBFIL 0x44
+#define CGU_REG_USBPCR1 0x48
+#define CGU_REG_LP0CDR 0x54
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LP1CDR 0x64
+#define CGU_REG_MSC0CDR 0x68
+#define CGU_REG_UHCCDR 0x6c
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x7c
+#define CGU_REG_PCMCDR 0x84
+#define CGU_REG_GPUCDR 0x88
+#define CGU_REG_HDMICDR 0x8c
+#define CGU_REG_MSC1CDR 0xa4
+#define CGU_REG_MSC2CDR 0xa8
+#define CGU_REG_BCHCDR 0xac
+#define CGU_REG_CLOCKSTATUS 0xd4
/* bits within the OPCR register */
-#define OPCR_SPENDN0 BIT(7)
-#define OPCR_SPENDN1 BIT(6)
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
/* bits within the USBPCR register */
-#define USBPCR_USB_MODE BIT(31)
+#define USBPCR_USB_MODE BIT(31)
#define USBPCR_IDPULLUP_MASK (0x3 << 28)
-#define USBPCR_COMMONONN BIT(25)
-#define USBPCR_VBUSVLDEXT BIT(24)
+#define USBPCR_COMMONONN BIT(25)
+#define USBPCR_VBUSVLDEXT BIT(24)
#define USBPCR_VBUSVLDEXTSEL BIT(23)
-#define USBPCR_POR BIT(22)
-#define USBPCR_OTG_DISABLE BIT(20)
+#define USBPCR_POR BIT(22)
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
#define USBPCR_COMPDISTUNE_MASK (0x7 << 17)
-#define USBPCR_OTGTUNE_MASK (0x7 << 14)
+#define USBPCR_OTGTUNE_MASK (0x7 << 14)
#define USBPCR_SQRXTUNE_MASK (0x7 << 11)
#define USBPCR_TXFSLSTUNE_MASK (0xf << 7)
#define USBPCR_TXPREEMPHTUNE BIT(6)
@@ -78,13 +80,13 @@
#define USBPCR1_REFCLKDIV_48 (0x2 << USBPCR1_REFCLKDIV_SHIFT)
#define USBPCR1_REFCLKDIV_24 (0x1 << USBPCR1_REFCLKDIV_SHIFT)
#define USBPCR1_REFCLKDIV_12 (0x0 << USBPCR1_REFCLKDIV_SHIFT)
-#define USBPCR1_USB_SEL BIT(28)
-#define USBPCR1_WORD_IF0 BIT(19)
-#define USBPCR1_WORD_IF1 BIT(18)
+#define USBPCR1_USB_SEL BIT(28)
+#define USBPCR1_WORD_IF0 BIT(19)
+#define USBPCR1_WORD_IF1 BIT(18)
/* bits within the USBRDT register */
-#define USBRDT_VBFIL_LD_EN BIT(25)
-#define USBRDT_USBRDT_MASK 0x7fffff
+#define USBRDT_VBFIL_LD_EN BIT(25)
+#define USBRDT_USBRDT_MASK 0x7fffff
/* bits within the USBVBFIL register */
#define USBVBFIL_IDDIGFIL_SHIFT 16
@@ -92,40 +94,14 @@
#define USBVBFIL_USBVBFIL_MASK (0xffff)
/* bits within the LCR register */
-#define LCR_PD_SCPU BIT(31)
-#define LCR_SCPUS BIT(27)
+#define LCR_PD_SCPU BIT(31)
+#define LCR_SCPUS BIT(27)
/* bits within the CLKGR1 register */
-#define CLKGR1_CORE1 BIT(15)
+#define CLKGR1_CORE1 BIT(15)
static struct ingenic_cgu *cgu;
-static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw)
-{
- /* we only use CLKCORE, revisit if that ever changes */
- return 0;
-}
-
-static int jz4780_otg_phy_set_parent(struct clk_hw *hw, u8 idx)
-{
- unsigned long flags;
- u32 usbpcr1;
-
- if (idx > 0)
- return -EINVAL;
-
- spin_lock_irqsave(&cgu->lock, flags);
-
- usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
- usbpcr1 &= ~USBPCR1_REFCLKSEL_MASK;
- /* we only use CLKCORE */
- usbpcr1 |= USBPCR1_REFCLKSEL_CORE;
- writel(usbpcr1, cgu->base + CGU_REG_USBPCR1);
-
- spin_unlock_irqrestore(&cgu->lock, flags);
- return 0;
-}
-
static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -149,7 +125,6 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return 19200000;
}
- BUG();
return parent_rate;
}
@@ -206,13 +181,43 @@ static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
return 0;
}
-static const struct clk_ops jz4780_otg_phy_ops = {
- .get_parent = jz4780_otg_phy_get_parent,
- .set_parent = jz4780_otg_phy_set_parent,
+static int jz4780_otg_phy_enable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+ return 0;
+}
+static void jz4780_otg_phy_disable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ return (readl(reg_opcr) & OPCR_SPENDN0) &&
+ !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+ !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
.round_rate = jz4780_otg_phy_round_rate,
.set_rate = jz4780_otg_phy_set_rate,
+
+ .enable = jz4780_otg_phy_enable,
+ .disable = jz4780_otg_phy_disable,
+ .is_enabled = jz4780_otg_phy_is_enabled,
};
static int jz4780_core1_enable(struct clk_hw *hw)
@@ -516,6 +521,18 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.gate = { CGU_REG_CLKGR0, 1 },
},
+ [JZ4780_CLK_EXCLK_DIV512] = {
+ "exclk_div512", CGU_CLK_FIXDIV,
+ .parents = { JZ4780_CLK_EXCLK },
+ .fixdiv = { 512 },
+ },
+
+ [JZ4780_CLK_RTC] = {
+ "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+ .parents = { JZ4780_CLK_EXCLK_DIV512, JZ4780_CLK_RTCLK },
+ .mux = { CGU_REG_OPCR, 2, 1},
+ },
+
/* Gate-only clocks */
[JZ4780_CLK_NEMC] = {
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index 453f3323cb99..9aa20b52e1c3 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -48,8 +48,87 @@
#define USBPCR_SIDDQ BIT(21)
#define USBPCR_OTG_DISABLE BIT(20)
+/* bits within the USBPCR1 register */
+#define USBPCR1_REFCLKSEL_SHIFT 26
+#define USBPCR1_REFCLKSEL_MASK (0x3 << USBPCR1_REFCLKSEL_SHIFT)
+#define USBPCR1_REFCLKSEL_CORE (0x2 << USBPCR1_REFCLKSEL_SHIFT)
+#define USBPCR1_REFCLKDIV_SHIFT 24
+#define USBPCR1_REFCLKDIV_MASK (0x3 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_48 (0x2 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_24 (0x1 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_12 (0x0 << USBPCR1_REFCLKDIV_SHIFT)
+
static struct ingenic_cgu *cgu;
+static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 usbpcr1;
+ unsigned refclk_div;
+
+ usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
+ refclk_div = usbpcr1 & USBPCR1_REFCLKDIV_MASK;
+
+ switch (refclk_div) {
+ case USBPCR1_REFCLKDIV_12:
+ return 12000000;
+
+ case USBPCR1_REFCLKDIV_24:
+ return 24000000;
+
+ case USBPCR1_REFCLKDIV_48:
+ return 48000000;
+ }
+
+ return parent_rate;
+}
+
+static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
+ unsigned long *parent_rate)
+{
+ if (req_rate < 18000000)
+ return 12000000;
+
+ if (req_rate < 36000000)
+ return 24000000;
+
+ return 48000000;
+}
+
+static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
+ unsigned long parent_rate)
+{
+ unsigned long flags;
+ u32 usbpcr1, div_bits;
+
+ switch (req_rate) {
+ case 12000000:
+ div_bits = USBPCR1_REFCLKDIV_12;
+ break;
+
+ case 24000000:
+ div_bits = USBPCR1_REFCLKDIV_24;
+ break;
+
+ case 48000000:
+ div_bits = USBPCR1_REFCLKDIV_48;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&cgu->lock, flags);
+
+ usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
+ usbpcr1 &= ~USBPCR1_REFCLKDIV_MASK;
+ usbpcr1 |= div_bits;
+ writel(usbpcr1, cgu->base + CGU_REG_USBPCR1);
+
+ spin_unlock_irqrestore(&cgu->lock, flags);
+ return 0;
+}
+
static int x1000_usb_phy_enable(struct clk_hw *hw)
{
void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
@@ -80,6 +159,10 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
}
static const struct clk_ops x1000_otg_phy_ops = {
+ .recalc_rate = x1000_otg_phy_recalc_rate,
+ .round_rate = x1000_otg_phy_round_rate,
+ .set_rate = x1000_otg_phy_set_rate,
+
.enable = x1000_usb_phy_enable,
.disable = x1000_usb_phy_disable,
.is_enabled = x1000_usb_phy_is_enabled,
@@ -144,7 +227,6 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
},
},
-
/* Custom (SoC-specific) OTG PHY */
[X1000_CLK_OTGPHY] = {
@@ -278,6 +360,19 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.mux = { CGU_REG_SSICDR, 30, 1 },
},
+ [X1000_CLK_EXCLK_DIV512] = {
+ "exclk_div512", CGU_CLK_FIXDIV,
+ .parents = { X1000_CLK_EXCLK },
+ .fixdiv = { 512 },
+ },
+
+ [X1000_CLK_RTC] = {
+ "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK_DIV512, X1000_CLK_RTCLK },
+ .mux = { CGU_REG_OPCR, 2, 1},
+ .gate = { CGU_REG_CLKGR, 27 },
+ },
+
/* Gate-only clocks */
[X1000_CLK_EMC] = {
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index a1b2ff0ee487..950aee243364 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -329,6 +329,19 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
.mux = { CGU_REG_SSICDR, 29, 1 },
},
+ [X1830_CLK_EXCLK_DIV512] = {
+ "exclk_div512", CGU_CLK_FIXDIV,
+ .parents = { X1830_CLK_EXCLK },
+ .fixdiv = { 512 },
+ },
+
+ [X1830_CLK_RTC] = {
+ "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK_DIV512, X1830_CLK_RTCLK },
+ .mux = { CGU_REG_OPCR, 2, 1},
+ .gate = { CGU_REG_CLKGR0, 29 },
+ },
+
/* Gate-only clocks */
[X1830_CLK_EMC] = {
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 8e2551ab8462..b351039cac09 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/mmp.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 7a7965141918..f254ceff3ea7 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/mmp.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 318c0adfaae1..058327310c25 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -308,6 +308,15 @@ config SC_GCC_7180
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
+config SC_LPASS_CORECC_7180
+ tristate "SC7180 LPASS Core Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the LPASS(Low Power Audio Subsystem) core clock controller
+ on SC7180 devices.
+ Say Y if you want to use LPASS clocks and power domains of the LPASS
+ core clock controller.
+
config SC_GPUCC_7180
tristate "SC7180 Graphics Clock Controller"
select SC_GCC_7180
@@ -419,6 +428,22 @@ config SM_GCC_8250
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GPUCC_8150
+ tristate "SM8150 Graphics Clock Controller"
+ select SM_GCC_8150
+ help
+ Support for the graphics clock controller on SM8150 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config SM_GPUCC_8250
+ tristate "SM8250 Graphics Clock Controller"
+ select SM_GCC_8250
+ help
+ Support for the graphics clock controller on SM8250 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ae0979bebe18..9677e769e7e9 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
@@ -65,6 +66,8 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
+obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
+obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 9b2dfa08acb2..26139ef005e4 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -56,7 +56,6 @@
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
-#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[CLK_ALPHA_PLL_TYPE_DEFAULT] = {
@@ -112,22 +111,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL_U1] = 0x20,
[PLL_OFF_TEST_CTL] = 0x24,
[PLL_OFF_TEST_CTL_U] = 0x28,
- [PLL_OFF_STATUS] = 0x30,
- [PLL_OFF_OPMODE] = 0x38,
- [PLL_OFF_ALPHA_VAL] = 0x40,
- [PLL_OFF_CAL_VAL] = 0x44,
- },
- [CLK_ALPHA_PLL_TYPE_LUCID] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_CAL_L_VAL] = 0x08,
- [PLL_OFF_USER_CTL] = 0x0c,
- [PLL_OFF_USER_CTL_U] = 0x10,
- [PLL_OFF_USER_CTL_U1] = 0x14,
- [PLL_OFF_CONFIG_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL_U] = 0x1c,
- [PLL_OFF_CONFIG_CTL_U1] = 0x20,
- [PLL_OFF_TEST_CTL] = 0x24,
- [PLL_OFF_TEST_CTL_U] = 0x28,
[PLL_OFF_TEST_CTL_U1] = 0x2c,
[PLL_OFF_STATUS] = 0x30,
[PLL_OFF_OPMODE] = 0x38,
@@ -156,9 +139,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define PLL_OUT_MASK 0x7
#define PLL_RATE_MARGIN 500
+/* TRION PLL specific settings and offsets */
+#define TRION_PLL_CAL_VAL 0x44
+#define TRION_PCAL_DONE BIT(26)
+
/* LUCID PLL specific settings and offsets */
-#define LUCID_PLL_CAL_VAL 0x44
-#define LUCID_PCAL_DONE BIT(26)
+#define LUCID_PCAL_DONE BIT(27)
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
@@ -912,14 +898,14 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
-const struct clk_ops clk_trion_fixed_pll_ops = {
+const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
.enable = clk_trion_pll_enable,
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
};
-EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
static unsigned long
clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -1339,12 +1325,12 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
val << PLL_POST_DIV_SHIFT);
}
-const struct clk_ops clk_trion_pll_postdiv_ops = {
+const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
.recalc_rate = clk_trion_pll_postdiv_recalc_rate,
.round_rate = clk_trion_pll_postdiv_round_rate,
.set_rate = clk_trion_pll_postdiv_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_trion_pll_postdiv_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *prate)
@@ -1399,13 +1385,13 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
* @regmap: register map
* @config: configuration to apply for pll
*/
-void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
if (config->l)
regmap_write(regmap, PLL_L_VAL(pll), config->l);
- regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
if (config->alpha)
regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
@@ -1458,13 +1444,13 @@ void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
/* Place the PLL in STANDBY mode */
regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
}
-EXPORT_SYMBOL_GPL(clk_lucid_pll_configure);
+EXPORT_SYMBOL_GPL(clk_trion_pll_configure);
/*
- * The Lucid PLL requires a power-on self-calibration which happens when the
+ * The TRION PLL requires a power-on self-calibration which happens when the
* PLL comes out of reset. Calibrate in case it is not completed.
*/
-static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+static int __alpha_pll_trion_prepare(struct clk_hw *hw, u32 pcal_done)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 regval;
@@ -1472,7 +1458,7 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
/* Return early if calibration is not needed. */
regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
- if (regval & LUCID_PCAL_DONE)
+ if (regval & pcal_done)
return 0;
/* On/off to calibrate */
@@ -1483,7 +1469,17 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
return ret;
}
-static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
+static int alpha_pll_trion_prepare(struct clk_hw *hw)
+{
+ return __alpha_pll_trion_prepare(hw, TRION_PCAL_DONE);
+}
+
+static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+{
+ return __alpha_pll_trion_prepare(hw, LUCID_PCAL_DONE);
+}
+
+static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
@@ -1537,25 +1533,27 @@ static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-const struct clk_ops clk_alpha_pll_lucid_ops = {
- .prepare = alpha_pll_lucid_prepare,
+const struct clk_ops clk_alpha_pll_trion_ops = {
+ .prepare = alpha_pll_trion_prepare,
.enable = clk_trion_pll_enable,
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
- .set_rate = alpha_pll_lucid_set_rate,
+ .set_rate = alpha_pll_trion_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
-const struct clk_ops clk_alpha_pll_fixed_lucid_ops = {
+const struct clk_ops clk_alpha_pll_lucid_ops = {
+ .prepare = alpha_pll_lucid_prepare,
.enable = clk_trion_pll_enable,
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_trion_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 1ba82be93dd5..d3201b87c0cd 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -14,7 +14,7 @@ enum {
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
- CLK_ALPHA_PLL_TYPE_LUCID,
+ CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -134,18 +134,23 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops;
extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops;
+extern const struct clk_ops clk_alpha_pll_trion_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_trion_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_trion_ops;
+
extern const struct clk_ops clk_alpha_pll_lucid_ops;
-extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops;
+#define clk_alpha_pll_fixed_lucid_ops clk_alpha_pll_fixed_trion_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
-void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+#define clk_lucid_pll_configure(pll, regmap, config) \
+ clk_trion_pll_configure(pll, regmap, config)
+
-extern const struct clk_ops clk_trion_fixed_pll_ops;
-extern const struct clk_ops clk_trion_pll_postdiv_ops;
#endif
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 538677befb86..68d8f7aaf64e 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -2251,6 +2251,19 @@ static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
},
};
+static struct clk_branch gcc_lpass_cfg_noc_sway_clk = {
+ .halt_reg = 0x47018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x47018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_cfg_noc_sway_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x77004,
.pd = {
@@ -2428,6 +2441,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
[GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
+ [GCC_LPASS_CFG_NOC_SWAY_CLK] = &gcc_lpass_cfg_noc_sway_clk.clkr,
};
static const struct qcom_reset_map gcc_sc7180_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index bf5730832ef3..f0b47b7d50ca 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
.halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x8a004,
.enable_mask = BIT(0),
@@ -2402,6 +2405,7 @@ static const struct qcom_reset_map gcc_sdm660_resets[] = {
[GCC_USB_20_BCR] = { 0x2f000 },
[GCC_USB_30_BCR] = { 0xf000 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_MSS_RESTART] = { 0x79000 },
};
static const struct regmap_config gcc_sdm660_regmap_config = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 72524cf11048..8e9b5b3cceaf 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -34,14 +34,8 @@ enum {
P_SLEEP_CLK,
};
-static const struct pll_vco trion_vco[] = {
- { 249600000, 2000000000, 0 },
-};
-
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .vco_table = trion_vco,
- .num_vco = ARRAY_SIZE(trion_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
.clkr = {
.enable_reg = 0x52000,
@@ -53,7 +47,7 @@ static struct clk_alpha_pll gpll0 = {
.name = "bi_tcxo",
},
.num_parents = 1,
- .ops = &clk_trion_fixed_pll_ops,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
},
},
};
@@ -79,14 +73,12 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
.hw = &gpll0.clkr.hw,
},
.num_parents = 1,
- .ops = &clk_trion_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
},
};
static struct clk_alpha_pll gpll7 = {
.offset = 0x1a000,
- .vco_table = trion_vco,
- .num_vco = ARRAY_SIZE(trion_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
.clkr = {
.enable_reg = 0x52000,
@@ -98,15 +90,13 @@ static struct clk_alpha_pll gpll7 = {
.name = "bi_tcxo",
},
.num_parents = 1,
- .ops = &clk_trion_fixed_pll_ops,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
},
},
};
static struct clk_alpha_pll gpll9 = {
.offset = 0x1c000,
- .vco_table = trion_vco,
- .num_vco = ARRAY_SIZE(trion_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
.clkr = {
.enable_reg = 0x52000,
@@ -118,7 +108,7 @@ static struct clk_alpha_pll gpll9 = {
.name = "bi_tcxo",
},
.num_parents = 1,
- .ops = &clk_trion_fixed_pll_ops,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
},
},
};
@@ -1617,6 +1607,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
};
static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(15),
@@ -1632,13 +1623,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
};
static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_gpll0_div_clk_src",
.parent_hws = (const struct clk_hw *[]){
- &gcc_gpu_gpll0_clk_src.clkr.hw },
+ &gpll0_out_even.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1729,6 +1721,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
};
static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(18),
@@ -1744,13 +1737,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = {
};
static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(19),
.hw.init = &(struct clk_init_data){
.name = "gcc_npu_gpll0_div_clk_src",
.parent_hws = (const struct clk_hw *[]){
- &gcc_npu_gpll0_clk_src.clkr.hw },
+ &gpll0_out_even.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 04944f11659b..bfc4ac02f9ea 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
@@ -29,6 +30,7 @@
/* CFG_GDSCR */
#define GDSC_POWER_UP_COMPLETE BIT(16)
#define GDSC_POWER_DOWN_COMPLETE BIT(15)
+#define GDSC_RETAIN_FF_ENABLE BIT(11)
#define CFG_GDSCR_OFFSET 0x4
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
@@ -216,6 +218,14 @@ static inline void gdsc_assert_reset_aon(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
GMEM_RESET_MASK, 0);
}
+
+static void gdsc_retain_ff_on(struct gdsc *sc)
+{
+ u32 mask = GDSC_RETAIN_FF_ENABLE;
+
+ regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
+}
+
static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
@@ -268,6 +278,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
udelay(1);
}
+ if (sc->flags & RETAIN_FF_ENABLE)
+ gdsc_retain_ff_on(sc);
+
return 0;
}
@@ -433,3 +446,29 @@ void gdsc_unregister(struct gdsc_desc *desc)
}
of_genpd_del_provider(dev->of_node);
}
+
+/*
+ * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
+ * running in the CX domain so the CPU doesn't need to know anything about the
+ * GX domain EXCEPT....
+ *
+ * Hardware constraints dictate that the GX be powered down before the CX. If
+ * the GMU crashes it could leave the GX on. In order to successfully bring back
+ * the device the CPU needs to disable the GX headswitch. There being no sane
+ * way to reach in and touch that register from deep inside the GPU driver we
+ * need to set up the infrastructure to be able to ensure that the GPU can
+ * ensure that the GX is off during this super special case. We do this by
+ * defining a GX gdsc with a dummy enable function and a "default" disable
+ * function.
+ *
+ * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
+ * driver. During power up, nothing will happen from the CPU (and the GMU will
+ * power up normally but during power down this will ensure that the GX domain
+ * is *really* off - this gives us a semi standard way of doing what we need.
+ */
+int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
+{
+ /* Do nothing but give genpd the impression that we were successful */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index c36fc26dcdff..bd537438c793 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -50,6 +50,7 @@ struct gdsc {
#define AON_RESET BIT(4)
#define POLL_CFG_GDSCR BIT(5)
#define ALWAYS_ON BIT(6)
+#define RETAIN_FF_ENABLE BIT(7)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
@@ -68,6 +69,7 @@ struct gdsc_desc {
int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
struct regmap *);
void gdsc_unregister(struct gdsc_desc *desc);
+int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain);
#else
static inline int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index 7b656b6aeced..88a739b6fec3 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -170,37 +170,12 @@ static struct gdsc cx_gdsc = {
.flags = VOTABLE,
};
-/*
- * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU
- * running in the CX domain so the CPU doesn't need to know anything about the
- * GX domain EXCEPT....
- *
- * Hardware constraints dictate that the GX be powered down before the CX. If
- * the GMU crashes it could leave the GX on. In order to successfully bring back
- * the device the CPU needs to disable the GX headswitch. There being no sane
- * way to reach in and touch that register from deep inside the GPU driver we
- * need to set up the infrastructure to be able to ensure that the GPU can
- * ensure that the GX is off during this super special case. We do this by
- * defining a GX gdsc with a dummy enable function and a "default" disable
- * function.
- *
- * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
- * driver. During power up, nothing will happen from the CPU (and the GMU will
- * power up normally but during power down this will ensure that the GX domain
- * is *really* off - this gives us a semi standard way of doing what we need.
- */
-static int gx_gdsc_enable(struct generic_pm_domain *domain)
-{
- /* Do nothing but give genpd the impression that we were successful */
- return 0;
-}
-
static struct gdsc gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
.pd = {
.name = "gx_gdsc",
- .power_on = gx_gdsc_enable,
+ .power_on = gdsc_gx_do_nothing_enable,
},
.pwrsts = PWRSTS_OFF_ON,
.flags = CLAMP_IO,
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index e40efba1bf7d..5663698b306b 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -131,37 +131,12 @@ static struct gdsc gpu_cx_gdsc = {
.flags = VOTABLE,
};
-/*
- * On SDM845 the GPU GX domain is *almost* entirely controlled by the GMU
- * running in the CX domain so the CPU doesn't need to know anything about the
- * GX domain EXCEPT....
- *
- * Hardware constraints dictate that the GX be powered down before the CX. If
- * the GMU crashes it could leave the GX on. In order to successfully bring back
- * the device the CPU needs to disable the GX headswitch. There being no sane
- * way to reach in and touch that register from deep inside the GPU driver we
- * need to set up the infrastructure to be able to ensure that the GPU can
- * ensure that the GX is off during this super special case. We do this by
- * defining a GX gdsc with a dummy enable function and a "default" disable
- * function.
- *
- * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
- * driver. During power up, nothing will happen from the CPU (and the GMU will
- * power up normally but during power down this will ensure that the GX domain
- * is *really* off - this gives us a semi standard way of doing what we need.
- */
-static int gx_gdsc_enable(struct generic_pm_domain *domain)
-{
- /* Do nothing but give genpd the impression that we were successful */
- return 0;
-}
-
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
.pd = {
.name = "gpu_gx_gdsc",
- .power_on = gx_gdsc_enable,
+ .power_on = gdsc_gx_do_nothing_enable,
},
.pwrsts = PWRSTS_OFF_ON,
.flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
new file mode 100644
index 000000000000..27c40754b2c7
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8150.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000002,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8150_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8150_resets[] = {
+ [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+ [GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8150_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8150_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8150_desc = {
+ .config = &gpu_cc_sm8150_regmap_config,
+ .clks = gpu_cc_sm8150_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8150_clocks),
+ .resets = gpu_cc_sm8150_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8150_resets),
+ .gdscs = gpu_cc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8150_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8150_match_table[] = {
+ { .compatible = "qcom,sm8150-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8150_match_table);
+
+static int gpu_cc_sm8150_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8150_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8150_driver = {
+ .probe = gpu_cc_sm8150_probe,
+ .driver = {
+ .name = "sm8150-gpucc",
+ .of_match_table = gpu_cc_sm8150_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8150_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8150_driver);
+}
+subsys_initcall(gpu_cc_sm8150_init);
+
+static void __exit gpu_cc_sm8150_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8150_driver);
+}
+module_exit(gpu_cc_sm8150_exit);
+
+MODULE_DESCRIPTION("QTI GPUCC SM8150 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
new file mode 100644
index 000000000000..3fa7d1f9ff98
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK 0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xf
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x029a699c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8250_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8250_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8250_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8250_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8250_desc = {
+ .config = &gpu_cc_sm8250_regmap_config,
+ .clks = gpu_cc_sm8250_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8250_clocks),
+ .resets = gpu_cc_sm8250_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8250_resets),
+ .gdscs = gpu_cc_sm8250_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8250_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8250_match_table[] = {
+ { .compatible = "qcom,sm8250-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8250_match_table);
+
+static int gpu_cc_sm8250_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ unsigned int value, mask;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8250_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Configure gpu_cc_cx_gmu_clk with recommended
+ * wakeup/sleep settings
+ */
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, 0x1098, mask, value);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8250_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8250_driver = {
+ .probe = gpu_cc_sm8250_probe,
+ .driver = {
+ .name = "sm8250-gpucc",
+ .of_match_table = gpu_cc_sm8250_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8250_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8250_driver);
+}
+subsys_initcall(gpu_cc_sm8250_init);
+
+static void __exit gpu_cc_sm8250_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8250_driver);
+}
+module_exit(gpu_cc_sm8250_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
new file mode 100644
index 000000000000..d4c1864e1ee9
--- /dev/null
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
+ .l = 0x20,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000000,
+ .user_ctl_val = 0x00005105,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_CAL_L_VAL] = 0x8,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL_U1] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1C,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x20,
+ [PLL_OFF_TEST_CTL] = 0x24,
+ [PLL_OFF_TEST_CTL_U] = 0x28,
+ [PLL_OFF_STATUS] = 0x30,
+ [PLL_OFF_OPMODE] = 0x38,
+ [PLL_OFF_FRAC] = 0x40,
+ },
+};
+
+static struct clk_alpha_pll lpass_lpaaudio_dig_pll = {
+ .offset = 0x1000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_lpaaudio_dig_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table
+ post_div_table_lpass_lpaaudio_dig_pll_out_odd[] = {
+ { 0x5, 5 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_lpaaudio_dig_pll_out_odd = {
+ .offset = 0x1000,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_lpass_lpaaudio_dig_pll_out_odd,
+ .num_post_div =
+ ARRAY_SIZE(post_div_table_lpass_lpaaudio_dig_pll_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpass_lpaaudio_dig_pll_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &lpass_lpaaudio_dig_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map lpass_core_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5 },
+};
+
+static const struct clk_parent_data lpass_core_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &lpass_lpaaudio_dig_pll_out_odd.clkr.hw },
+};
+
+static const struct parent_map lpass_core_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static struct clk_rcg2 core_clk_src = {
+ .cmd_rcgr = 0x1d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "core_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ext_mclk0_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_ext_lpaif_clk_src[] = {
+ F(256000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 32),
+ F(512000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 16),
+ F(768000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 16),
+ F(1024000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 8),
+ F(1536000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 8),
+ F(2048000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 4),
+ F(3072000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 4),
+ F(4096000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 2),
+ F(6144000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 2),
+ F(8192000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(12288000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24576000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ext_mclk0_clk_src = {
+ .cmd_rcgr = 0x20000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_ext_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ext_mclk0_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpaif_pri_clk_src = {
+ .cmd_rcgr = 0x10000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_ext_lpaif_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpaif_pri_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpaif_sec_clk_src = {
+ .cmd_rcgr = 0x11000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_ext_lpaif_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpaif_sec_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch lpass_audio_core_ext_mclk0_clk = {
+ .halt_reg = 0x20014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x20014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_audio_core_ext_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ext_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_core_lpaif_pri_ibit_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x10018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_audio_core_lpaif_pri_ibit_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &lpaif_pri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_core_lpaif_sec_ibit_clk = {
+ .halt_reg = 0x11018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x11018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x11018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_audio_core_lpaif_sec_ibit_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &lpaif_sec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_core_sysnoc_mport_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x23000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_audio_core_sysnoc_mport_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *lpass_core_cc_sc7180_clocks[] = {
+ [EXT_MCLK0_CLK_SRC] = &ext_mclk0_clk_src.clkr,
+ [LPAIF_PRI_CLK_SRC] = &lpaif_pri_clk_src.clkr,
+ [LPAIF_SEC_CLK_SRC] = &lpaif_sec_clk_src.clkr,
+ [CORE_CLK_SRC] = &core_clk_src.clkr,
+ [LPASS_AUDIO_CORE_EXT_MCLK0_CLK] = &lpass_audio_core_ext_mclk0_clk.clkr,
+ [LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK] =
+ &lpass_audio_core_lpaif_pri_ibit_clk.clkr,
+ [LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK] =
+ &lpass_audio_core_lpaif_sec_ibit_clk.clkr,
+ [LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK] =
+ &lpass_audio_core_sysnoc_mport_core_clk.clkr,
+ [LPASS_LPAAUDIO_DIG_PLL] = &lpass_lpaaudio_dig_pll.clkr,
+ [LPASS_LPAAUDIO_DIG_PLL_OUT_ODD] = &lpass_lpaaudio_dig_pll_out_odd.clkr,
+};
+
+static struct gdsc lpass_pdc_hm_gdsc = {
+ .gdscr = 0x3090,
+ .pd = {
+ .name = "lpass_pdc_hm_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc lpass_audio_hm_gdsc = {
+ .gdscr = 0x9090,
+ .pd = {
+ .name = "lpass_audio_hm_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc lpass_core_hm_gdsc = {
+ .gdscr = 0x0,
+ .pd = {
+ .name = "lpass_core_hm_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *lpass_core_hm_sc7180_gdscs[] = {
+ [LPASS_CORE_HM_GDSCR] = &lpass_core_hm_gdsc,
+};
+
+static struct gdsc *lpass_audio_hm_sc7180_gdscs[] = {
+ [LPASS_PDC_HM_GDSCR] = &lpass_pdc_hm_gdsc,
+ [LPASS_AUDIO_HM_GDSCR] = &lpass_audio_hm_gdsc,
+};
+
+static struct regmap_config lpass_core_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc lpass_core_hm_sc7180_desc = {
+ .config = &lpass_core_cc_sc7180_regmap_config,
+ .gdscs = lpass_core_hm_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_core_hm_sc7180_gdscs),
+};
+
+static const struct qcom_cc_desc lpass_core_cc_sc7180_desc = {
+ .config = &lpass_core_cc_sc7180_regmap_config,
+ .clks = lpass_core_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(lpass_core_cc_sc7180_clocks),
+};
+
+static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
+ .config = &lpass_core_cc_sc7180_regmap_config,
+ .gdscs = lpass_audio_hm_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
+};
+
+static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ struct regmap *regmap;
+ int ret;
+
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_audio_cc";
+ desc = &lpass_audio_hm_sc7180_desc;
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+ return ret;
+
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc";
+ regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the CLK always-ON
+ * LPASS_AUDIO_CORE_SYSNOC_SWAY_CORE_CLK
+ */
+ regmap_update_bits(regmap, 0x24000, BIT(0), BIT(0));
+
+ /* PLL settings */
+ regmap_write(regmap, 0x1008, 0x20);
+ regmap_update_bits(regmap, 0x1014, BIT(0), BIT(0));
+
+ clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
+ &lpass_lpaaudio_dig_pll_config);
+
+ return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+}
+
+static int lpass_hm_core_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
+ desc = &lpass_core_hm_sc7180_desc;
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
+ {
+ .compatible = "qcom,sc7180-lpasshm",
+ .data = lpass_hm_core_probe,
+ },
+ {
+ .compatible = "qcom,sc7180-lpasscorecc",
+ .data = lpass_core_cc_sc7180_probe,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7180_match_table);
+
+static int lpass_core_sc7180_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *p);
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto disable_pm_runtime;
+ }
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ ret = clk_probe(pdev);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops lpass_core_cc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_core_cc_sc7180_driver = {
+ .probe = lpass_core_sc7180_probe,
+ .driver = {
+ .name = "lpass_core_cc-sc7180",
+ .of_match_table = lpass_core_cc_sc7180_match_table,
+ .pm = &lpass_core_cc_pm_ops,
+ },
+};
+
+static int __init lpass_core_cc_sc7180_init(void)
+{
+ return platform_driver_register(&lpass_core_cc_sc7180_driver);
+}
+subsys_initcall(lpass_core_cc_sc7180_init);
+
+static void __exit lpass_core_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&lpass_core_cc_sc7180_driver);
+}
+module_exit(lpass_core_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 10560d963baf..4c6c9167ef50 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include "clk.h"
@@ -86,23 +87,14 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
{
struct regmap *grf = pll->ctx->grf;
unsigned int val;
- int delay = 24000000, ret;
-
- while (delay > 0) {
- ret = regmap_read(grf, pll->lock_offset, &val);
- if (ret) {
- pr_err("%s: failed to read pll lock status: %d\n",
- __func__, ret);
- return ret;
- }
+ int ret;
- if (val & BIT(pll->lock_shift))
- return 0;
- delay--;
- }
+ ret = regmap_read_poll_timeout(grf, pll->lock_offset, val,
+ val & BIT(pll->lock_shift), 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
- pr_err("%s: timeout waiting for pll to lock\n", __func__);
- return -ETIMEDOUT;
+ return ret;
}
/**
@@ -118,12 +110,31 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
#define RK3036_PLLCON1_REFDIV_SHIFT 0
#define RK3036_PLLCON1_POSTDIV2_MASK 0x7
#define RK3036_PLLCON1_POSTDIV2_SHIFT 6
+#define RK3036_PLLCON1_LOCK_STATUS BIT(10)
#define RK3036_PLLCON1_DSMPD_MASK 0x1
#define RK3036_PLLCON1_DSMPD_SHIFT 12
+#define RK3036_PLLCON1_PWRDOWN BIT(13)
#define RK3036_PLLCON2_FRAC_MASK 0xffffff
#define RK3036_PLLCON2_FRAC_SHIFT 0
-#define RK3036_PLLCON1_PWRDOWN (1 << 13)
+static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ u32 pllcon;
+ int ret;
+
+ /*
+ * Lock time typical 250, max 500 input clock cycles @24MHz
+ * So define a very safe maximum of 1000us, meaning 24000 cycles.
+ */
+ ret = readl_relaxed_poll_timeout(pll->reg_base + RK3036_PLLCON(1),
+ pllcon,
+ pllcon & RK3036_PLLCON1_LOCK_STATUS,
+ 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+ return ret;
+}
static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll,
struct rockchip_pll_rate_table *rate)
@@ -221,7 +232,7 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2));
/* wait for the pll to lock */
- ret = rockchip_pll_wait_lock(pll);
+ ret = rockchip_rk3036_pll_wait_lock(pll);
if (ret) {
pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
__func__);
@@ -260,7 +271,7 @@ static int rockchip_rk3036_pll_enable(struct clk_hw *hw)
writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0),
pll->reg_base + RK3036_PLLCON(1));
- rockchip_pll_wait_lock(pll);
+ rockchip_rk3036_pll_wait_lock(pll);
return 0;
}
@@ -589,19 +600,20 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll)
{
u32 pllcon;
- int delay = 24000000;
-
- /* poll check the lock status in rk3399 xPLLCON2 */
- while (delay > 0) {
- pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
- if (pllcon & RK3399_PLLCON2_LOCK_STATUS)
- return 0;
+ int ret;
- delay--;
- }
+ /*
+ * Lock time typical 250, max 500 input clock cycles @24MHz
+ * So define a very safe maximum of 1000us, meaning 24000 cycles.
+ */
+ ret = readl_relaxed_poll_timeout(pll->reg_base + RK3399_PLLCON(2),
+ pllcon,
+ pllcon & RK3399_PLLCON2_LOCK_STATUS,
+ 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
- pr_err("%s: timeout waiting for pll to lock\n", __func__);
- return -ETIMEDOUT;
+ return ret;
}
static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll,
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 77aebfb1d6d5..730020fcc7fe 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -751,6 +751,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"pclk_peri",
"hclk_cpubus",
"hclk_vio_bus",
+ "sclk_mac_lbtest",
};
static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index cc2a177bbdbf..93c794695c46 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -15,6 +15,11 @@
#define RK3288_GRF_SOC_CON(x) (0x244 + x * 4)
#define RK3288_GRF_SOC_STATUS1 0x284
+enum rk3288_variant {
+ RK3288_CRU,
+ RK3288W_CRU,
+};
+
enum rk3288_plls {
apll, dpll, cpll, gpll, npll,
};
@@ -425,8 +430,6 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 0, GFLAGS),
- DIV(0, "hclk_vio", "aclk_vio0", 0,
- RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 2, GFLAGS),
@@ -819,6 +822,16 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS),
};
+static struct rockchip_clk_branch rk3288w_hclkvio_branch[] __initdata = {
+ DIV(0, "hclk_vio", "aclk_vio1", 0,
+ RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+};
+
+static struct rockchip_clk_branch rk3288_hclkvio_branch[] __initdata = {
+ DIV(0, "hclk_vio", "aclk_vio0", 0,
+ RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+};
+
static const char *const rk3288_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
@@ -914,7 +927,8 @@ static struct syscore_ops rk3288_clk_syscore_ops = {
.resume = rk3288_clk_resume,
};
-static void __init rk3288_clk_init(struct device_node *np)
+static void __init rk3288_common_init(struct device_node *np,
+ enum rk3288_variant soc)
{
struct rockchip_clk_provider *ctx;
@@ -936,6 +950,14 @@ static void __init rk3288_clk_init(struct device_node *np)
RK3288_GRF_SOC_STATUS1);
rockchip_clk_register_branches(ctx, rk3288_clk_branches,
ARRAY_SIZE(rk3288_clk_branches));
+
+ if (soc == RK3288W_CRU)
+ rockchip_clk_register_branches(ctx, rk3288w_hclkvio_branch,
+ ARRAY_SIZE(rk3288w_hclkvio_branch));
+ else
+ rockchip_clk_register_branches(ctx, rk3288_hclkvio_branch,
+ ARRAY_SIZE(rk3288_hclkvio_branch));
+
rockchip_clk_protect_critical(rk3288_critical_clocks,
ARRAY_SIZE(rk3288_critical_clocks));
@@ -954,4 +976,15 @@ static void __init rk3288_clk_init(struct device_node *np)
rockchip_clk_of_add_provider(np, ctx);
}
+
+static void __init rk3288_clk_init(struct device_node *np)
+{
+ rk3288_common_init(np, RK3288_CRU);
+}
CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
+
+static void __init rk3288w_clk_init(struct device_node *np)
+{
+ rk3288_common_init(np, RK3288W_CRU);
+}
+CLK_OF_DECLARE(rk3288w_cru, "rockchip,rk3288w-cru", rk3288w_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index c186a1985bf4..2429b7c2a8b3 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -808,22 +808,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
RK3328_SDMMC_CON0, 1),
MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
- RK3328_SDMMC_CON1, 0),
+ RK3328_SDMMC_CON1, 1),
MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
RK3328_SDIO_CON0, 1),
MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
- RK3328_SDIO_CON1, 0),
+ RK3328_SDIO_CON1, 1),
MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
RK3328_EMMC_CON0, 1),
MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
- RK3328_EMMC_CON1, 0),
+ RK3328_EMMC_CON1, 1),
MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON0, 1),
MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
- RK3328_SDMMC_EXT_CON1, 0),
+ RK3328_SDMMC_EXT_CON1, 1),
};
static const char *const rk3328_critical_clocks[] __initconst = {
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index c84d5bab7ac2..b95483bb6a5e 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
for (i = pll1; i < maxclk; i++) {
atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
- BUG_ON(!atlas6_clks[i]);
+ BUG_ON(IS_ERR(atlas6_clks[i]));
}
clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
clk_register_clkdev(atlas6_clks[io], NULL, "io");
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 0b212cf2e794..f180c055d33f 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -327,16 +327,26 @@ int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll)
return clk_pll_wait_for_lock(pll);
}
+static bool pllm_clk_is_gated_by_pmc(struct tegra_clk_pll *pll)
+{
+ u32 val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+
+ return (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) &&
+ !(val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE);
+}
+
static int clk_pll_is_enabled(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
- if (pll->params->flags & TEGRA_PLLM) {
- val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
- if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
- return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
- }
+ /*
+ * Power Management Controller (PMC) can override the PLLM clock
+ * settings, including the enable-state. The PLLM is enabled when
+ * PLLM's CaR state is ON and when PLLM isn't gated by PMC.
+ */
+ if ((pll->params->flags & TEGRA_PLLM) && pllm_clk_is_gated_by_pmc(pll))
+ return 0;
val = pll_readl_base(pll);
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 7c774ea7ddeb..18564efdc651 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
-obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-st.o
+obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-fch.o
clk-x86-lpss-objs := clk-lpt.o
obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
index c03cc6b85b9f..3179557b5f78 100644
--- a/drivers/clk/x86/clk-cgu-pll.c
+++ b/drivers/clk/x86/clk-cgu-pll.c
@@ -128,7 +128,7 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
pll->hw.init = &init;
hw = &pll->hw;
- ret = clk_hw_register(dev, hw);
+ ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 56af0e04ec1e..33de600e0c38 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -119,7 +119,7 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
mux->hw.init = &init;
hw = &mux->hw;
- ret = clk_hw_register(dev, hw);
+ ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
@@ -247,7 +247,7 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
div->hw.init = &init;
hw = &div->hw;
- ret = clk_hw_register(dev, hw);
+ ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
@@ -361,7 +361,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
gate->hw.init = &init;
hw = &gate->hw;
- ret = clk_hw_register(dev, hw);
+ ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
@@ -420,18 +420,14 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
unsigned int div0, div1, exdiv;
- unsigned long flags;
u64 prate;
- spin_lock_irqsave(&ddiv->lock, flags);
div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift0, ddiv->width0) + 1;
div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift1, ddiv->width1) + 1;
exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift2, ddiv->width2);
- spin_unlock_irqrestore(&ddiv->lock, flags);
-
prate = (u64)parent_rate;
do_div(prate, div0);
do_div(prate, div1);
@@ -548,24 +544,21 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = div * 2;
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
+ spin_unlock_irqrestore(&ddiv->lock, flags);
- if (div <= 0) {
- spin_unlock_irqrestore(&ddiv->lock, flags);
+ if (div <= 0)
return *prate;
- }
- if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
- if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
- spin_unlock_irqrestore(&ddiv->lock, flags);
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
+ if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- }
- }
rate64 = *prate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+ spin_lock_irqsave(&ddiv->lock, flags);
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
rate64 = rate64 * 2;
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
@@ -588,19 +581,18 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
unsigned int nr_clk)
{
struct device *dev = ctx->dev;
- struct clk_init_data init = {};
- struct lgm_clk_ddiv *ddiv;
struct clk_hw *hw;
unsigned int idx;
int ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- ddiv = NULL;
+ struct clk_init_data init = {};
+ struct lgm_clk_ddiv *ddiv;
+
ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
if (!ddiv)
return -ENOMEM;
- memset(&init, 0, sizeof(init));
init.name = list->name;
init.ops = &lgm_clk_ddiv_ops;
init.flags = list->flags;
@@ -624,7 +616,7 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
ddiv->hw.init = &init;
hw = &ddiv->hw;
- ret = clk_hw_register(dev, hw);
+ ret = devm_clk_hw_register(dev, hw);
if (ret) {
dev_err(dev, "register clk: %s failed!\n", list->name);
return ret;
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
new file mode 100644
index 000000000000..8f7c5142b0f0
--- /dev/null
+++ b/drivers/clk/x86/clk-fch.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * clock framework for AMD Stoney based clocks
+ *
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_data/clk-fch.h>
+#include <linux/platform_device.h>
+
+/* Clock Driving Strength 2 register */
+#define CLKDRVSTR2 0x28
+/* Clock Control 1 register */
+#define MISCCLKCNTL1 0x40
+/* Auxiliary clock1 enable bit */
+#define OSCCLKENB 2
+/* 25Mhz auxiliary output clock freq bit */
+#define OSCOUT1CLK25MHZ 16
+
+#define ST_CLK_48M 0
+#define ST_CLK_25M 1
+#define ST_CLK_MUX 2
+#define ST_CLK_GATE 3
+#define ST_MAX_CLKS 4
+
+#define RV_CLK_48M 0
+#define RV_CLK_GATE 1
+#define RV_MAX_CLKS 2
+
+static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
+static struct clk_hw *hws[ST_MAX_CLKS];
+
+static int fch_clk_probe(struct platform_device *pdev)
+{
+ struct fch_clk_data *fch_data;
+
+ fch_data = dev_get_platdata(&pdev->dev);
+ if (!fch_data || !fch_data->base)
+ return -EINVAL;
+
+ if (!fch_data->is_rv) {
+ hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+ NULL, 0, 48000000);
+ hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz",
+ NULL, 0, 25000000);
+
+ hws[ST_CLK_MUX] = clk_hw_register_mux(NULL, "oscout1_mux",
+ clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
+ 0, fch_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0,
+ NULL);
+
+ clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
+
+ hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+ "oscout1_mux", 0, fch_data->base + MISCCLKCNTL1,
+ OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE],
+ "oscout1", NULL);
+ } else {
+ hws[RV_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+ NULL, 0, 48000000);
+
+ hws[RV_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+ "clk48MHz", 0, fch_data->base + MISCCLKCNTL1,
+ OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[RV_CLK_GATE],
+ "oscout1", NULL);
+ }
+
+ return 0;
+}
+
+static int fch_clk_remove(struct platform_device *pdev)
+{
+ int i, clks;
+ struct fch_clk_data *fch_data;
+
+ fch_data = dev_get_platdata(&pdev->dev);
+
+ clks = fch_data->is_rv ? RV_MAX_CLKS : ST_MAX_CLKS;
+
+ for (i = 0; i < clks; i++)
+ clk_hw_unregister(hws[i]);
+
+ return 0;
+}
+
+static struct platform_driver fch_clk_driver = {
+ .driver = {
+ .name = "clk-fch",
+ .suppress_bind_attrs = true,
+ },
+ .probe = fch_clk_probe,
+ .remove = fch_clk_remove,
+};
+builtin_platform_driver(fch_clk_driver);
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
deleted file mode 100644
index 25d4b97aff9b..000000000000
--- a/drivers/clk/x86/clk-st.c
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * clock framework for AMD Stoney based clocks
- *
- * Copyright 2018 Advanced Micro Devices, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/platform_data/clk-st.h>
-#include <linux/platform_device.h>
-
-/* Clock Driving Strength 2 register */
-#define CLKDRVSTR2 0x28
-/* Clock Control 1 register */
-#define MISCCLKCNTL1 0x40
-/* Auxiliary clock1 enable bit */
-#define OSCCLKENB 2
-/* 25Mhz auxiliary output clock freq bit */
-#define OSCOUT1CLK25MHZ 16
-
-#define ST_CLK_48M 0
-#define ST_CLK_25M 1
-#define ST_CLK_MUX 2
-#define ST_CLK_GATE 3
-#define ST_MAX_CLKS 4
-
-static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
-static struct clk_hw *hws[ST_MAX_CLKS];
-
-static int st_clk_probe(struct platform_device *pdev)
-{
- struct st_clk_data *st_data;
-
- st_data = dev_get_platdata(&pdev->dev);
- if (!st_data || !st_data->base)
- return -EINVAL;
-
- hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz", NULL, 0,
- 48000000);
- hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz", NULL, 0,
- 25000000);
-
- hws[ST_CLK_MUX] = clk_hw_register_mux(NULL, "oscout1_mux",
- clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
- 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
-
- clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
-
- hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
- 0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
- CLK_GATE_SET_TO_DISABLE, NULL);
-
- devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
- NULL);
-
- return 0;
-}
-
-static int st_clk_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ST_MAX_CLKS; i++)
- clk_hw_unregister(hws[i]);
- return 0;
-}
-
-static struct platform_driver st_clk_driver = {
- .driver = {
- .name = "clk-st",
- .suppress_bind_attrs = true,
- },
- .probe = st_clk_probe,
- .remove = st_clk_remove,
-};
-builtin_platform_driver(st_clk_driver);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 2ed8b4361d95..68b087bff59c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -291,6 +291,10 @@ config CLKSRC_STM32
select CLKSRC_MMIO
select TIMER_OF
+config CLKSRC_STM32_LP
+ bool "Low power clocksource for STM32 SoCs"
+ depends on MFD_STM32_LPTIMER || COMPILE_TEST
+
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK
@@ -649,9 +653,8 @@ config ATCPIT100_TIMER
This option enables support for the Andestech ATCPIT100 timers.
config RISCV_TIMER
- bool "Timer for the RISC-V platform"
+ bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV
- default y
select TIMER_PROBE
select TIMER_OF
help
@@ -659,6 +662,15 @@ config RISCV_TIMER
is accessed via both the SBI and the rdcycle instruction. This is
required for all RISC-V systems.
+config CLINT_TIMER
+ bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK && RISCV
+ select TIMER_PROBE
+ select TIMER_OF
+ help
+ This option enables the CLINT timer for RISC-V systems. The CLINT
+ driver is usually used for NoMMU RISC-V systems.
+
config CSKY_MP_TIMER
bool "SMP Timer for the C-SKY platform" if COMPILE_TEST
depends on CSKY
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 3994e221e262..1c444cc3bb44 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
+obj-$(CONFIG_CLKSRC_STM32_LP) += timer-stm32-lp.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
+obj-$(CONFIG_CLINT_TIMER) += timer-clint.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 38858e141731..80e960602030 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -309,7 +309,7 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
/* restore original register value */
writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
- /* fall through */
+ fallthrough;
default:
return NOTIFY_DONE;
}
@@ -392,7 +392,7 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
- /* fall through */
+ fallthrough;
case PRE_RATE_CHANGE:
case ABORT_RATE_CHANGE:
default:
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
new file mode 100644
index 000000000000..8eeafa82c03d
--- /dev/null
+++ b/drivers/clocksource/timer-clint.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
+ * CLINT MMIO timer device.
+ */
+
+#define pr_fmt(fmt) "clint: " fmt
+#include <linux/bitops.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/smp.h>
+
+#define CLINT_IPI_OFF 0
+#define CLINT_TIMER_CMP_OFF 0x4000
+#define CLINT_TIMER_VAL_OFF 0xbff8
+
+/* CLINT manages IPI and Timer for RISC-V M-mode */
+static u32 __iomem *clint_ipi_base;
+static u64 __iomem *clint_timer_cmp;
+static u64 __iomem *clint_timer_val;
+static unsigned long clint_timer_freq;
+static unsigned int clint_timer_irq;
+
+static void clint_send_ipi(const struct cpumask *target)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
+}
+
+static void clint_clear_ipi(void)
+{
+ writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
+}
+
+static struct riscv_ipi_ops clint_ipi_ops = {
+ .ipi_inject = clint_send_ipi,
+ .ipi_clear = clint_clear_ipi,
+};
+
+#ifdef CONFIG_64BIT
+#define clint_get_cycles() readq_relaxed(clint_timer_val)
+#else
+#define clint_get_cycles() readl_relaxed(clint_timer_val)
+#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
+#endif
+
+#ifdef CONFIG_64BIT
+static u64 notrace clint_get_cycles64(void)
+{
+ return clint_get_cycles();
+}
+#else /* CONFIG_64BIT */
+static u64 notrace clint_get_cycles64(void)
+{
+ u32 hi, lo;
+
+ do {
+ hi = clint_get_cycles_hi();
+ lo = clint_get_cycles();
+ } while (hi != clint_get_cycles_hi());
+
+ return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+static u64 clint_rdtime(struct clocksource *cs)
+{
+ return clint_get_cycles64();
+}
+
+static struct clocksource clint_clocksource = {
+ .name = "clint_clocksource",
+ .rating = 300,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = clint_rdtime,
+};
+
+static int clint_clock_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ void __iomem *r = clint_timer_cmp +
+ cpuid_to_hartid_map(smp_processor_id());
+
+ csr_set(CSR_IE, IE_TIE);
+ writeq_relaxed(clint_get_cycles64() + delta, r);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
+ .name = "clint_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .set_next_event = clint_clock_next_event,
+};
+
+static int clint_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
+
+ ce->cpumask = cpumask_of(cpu);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+
+ enable_percpu_irq(clint_timer_irq,
+ irq_get_trigger_type(clint_timer_irq));
+ return 0;
+}
+
+static int clint_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(clint_timer_irq);
+ return 0;
+}
+
+static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
+
+ csr_clear(CSR_IE, IE_TIE);
+ evdev->event_handler(evdev);
+
+ return IRQ_HANDLED;
+}
+
+static int __init clint_timer_init_dt(struct device_node *np)
+{
+ int rc;
+ u32 i, nr_irqs;
+ void __iomem *base;
+ struct of_phandle_args oirq;
+
+ /*
+ * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
+ * RV_IRQ_SOFT. If it's anything else then we ignore the device.
+ */
+ nr_irqs = of_irq_count(np);
+ for (i = 0; i < nr_irqs; i++) {
+ if (of_irq_parse_one(np, i, &oirq)) {
+ pr_err("%pOFP: failed to parse irq %d.\n", np, i);
+ continue;
+ }
+
+ if ((oirq.args_count != 1) ||
+ (oirq.args[0] != RV_IRQ_TIMER &&
+ oirq.args[0] != RV_IRQ_SOFT)) {
+ pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
+ np, i, oirq.args[0]);
+ return -ENODEV;
+ }
+
+ /* Find parent irq domain and map timer irq */
+ if (!clint_timer_irq &&
+ oirq.args[0] == RV_IRQ_TIMER &&
+ irq_find_host(oirq.np))
+ clint_timer_irq = irq_of_parse_and_map(np, i);
+ }
+
+ /* If CLINT timer irq not found then fail */
+ if (!clint_timer_irq) {
+ pr_err("%pOFP: timer irq not found\n", np);
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%pOFP: could not map registers\n", np);
+ return -ENODEV;
+ }
+
+ clint_ipi_base = base + CLINT_IPI_OFF;
+ clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
+ clint_timer_val = base + CLINT_TIMER_VAL_OFF;
+ clint_timer_freq = riscv_timebase;
+
+ pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
+
+ rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
+ if (rc) {
+ pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
+ goto fail_iounmap;
+ }
+
+ sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
+
+ rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
+ "clint-timer", &clint_clock_event);
+ if (rc) {
+ pr_err("registering percpu irq failed [%d]\n", rc);
+ goto fail_iounmap;
+ }
+
+ rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
+ "clockevents/clint/timer:starting",
+ clint_timer_starting_cpu,
+ clint_timer_dying_cpu);
+ if (rc) {
+ pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
+ goto fail_free_irq;
+ }
+
+ riscv_set_ipi_ops(&clint_ipi_ops);
+ clint_clear_ipi();
+
+ return 0;
+
+fail_free_irq:
+ free_irq(clint_timer_irq, &clint_clock_event);
+fail_iounmap:
+ iounmap(base);
+ return rc;
+}
+
+TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
+TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 9de1dabfb126..c51c5ed15aa7 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -19,26 +19,13 @@
#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
-
-u64 __iomem *riscv_time_cmp;
-u64 __iomem *riscv_time_val;
-
-static inline void mmio_set_timer(u64 val)
-{
- void __iomem *r;
-
- r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
- writeq_relaxed(val, r);
-}
+#include <asm/timex.h>
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
csr_set(CSR_IE, IE_TIE);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_set_timer(get_cycles64() + delta);
- else
- mmio_set_timer(get_cycles64() + delta);
+ sbi_set_timer(get_cycles64() + delta);
return 0;
}
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
new file mode 100644
index 000000000000..db2841d0beb8
--- /dev/null
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define CFGR_PSC_OFFSET 9
+#define STM32_LP_RATING 1000
+#define STM32_TARGET_CLKRATE (32000 * HZ)
+#define STM32_LP_MAX_PSC 7
+
+struct stm32_lp_private {
+ struct regmap *reg;
+ struct clock_event_device clkevt;
+ unsigned long period;
+ struct device *dev;
+};
+
+static struct stm32_lp_private*
+to_priv(struct clock_event_device *clkevt)
+{
+ return container_of(clkevt, struct stm32_lp_private, clkevt);
+}
+
+static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ regmap_write(priv->reg, STM32_LPTIM_IER, 0);
+ /* clear pending flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
+
+ return 0;
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ /* disable LPTIMER to be able to write into IER register*/
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+ /* enable LPTIMER to be able to write into ARR register */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+
+ /* start counter */
+ if (is_periodic)
+ regmap_write(priv->reg, STM32_LPTIM_CR,
+ STM32_LPTIM_CNTSTRT | STM32_LPTIM_ENABLE);
+ else
+ regmap_write(priv->reg, STM32_LPTIM_CR,
+ STM32_LPTIM_SNGSTRT | STM32_LPTIM_ENABLE);
+
+ return 0;
+}
+
+static int stm32_clkevent_lp_set_next_event(unsigned long evt,
+ struct clock_event_device *clkevt)
+{
+ return stm32_clkevent_lp_set_timer(evt, clkevt,
+ clockevent_state_periodic(clkevt));
+}
+
+static int stm32_clkevent_lp_set_periodic(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ return stm32_clkevent_lp_set_timer(priv->period, clkevt, true);
+}
+
+static int stm32_clkevent_lp_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ return stm32_clkevent_lp_set_timer(priv->period, clkevt, false);
+}
+
+static irqreturn_t stm32_clkevent_lp_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
+
+ if (clkevt->event_handler)
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
+ unsigned long *rate)
+{
+ int i;
+
+ for (i = 0; i <= STM32_LP_MAX_PSC; i++) {
+ if (DIV_ROUND_CLOSEST(*rate, 1 << i) < STM32_TARGET_CLKRATE)
+ break;
+ }
+
+ regmap_write(priv->reg, STM32_LPTIM_CFGR, i << CFGR_PSC_OFFSET);
+
+ /* Adjust rate and period given the prescaler value */
+ *rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
+ priv->period = DIV_ROUND_UP(*rate, HZ);
+}
+
+static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
+ struct device_node *np, unsigned long rate)
+{
+ priv->clkevt.name = np->full_name;
+ priv->clkevt.cpumask = cpu_possible_mask;
+ priv->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ priv->clkevt.set_state_shutdown = stm32_clkevent_lp_shutdown;
+ priv->clkevt.set_state_periodic = stm32_clkevent_lp_set_periodic;
+ priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
+ priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
+ priv->clkevt.rating = STM32_LP_RATING;
+
+ clockevents_config_and_register(&priv->clkevt, rate, 0x1,
+ STM32_LPTIM_MAX_ARR);
+}
+
+static int stm32_clkevent_lp_probe(struct platform_device *pdev)
+{
+ struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_lp_private *priv;
+ unsigned long rate;
+ int ret, irq;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg = ddata->regmap;
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret)
+ return -EINVAL;
+
+ rate = clk_get_rate(ddata->clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_clk_disable;
+ }
+
+ irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+ if (irq <= 0) {
+ ret = irq;
+ goto out_clk_disable;
+ }
+
+ if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
+ ret = device_init_wakeup(&pdev->dev, true);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ goto out_clk_disable;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, stm32_clkevent_lp_irq_handler,
+ IRQF_TIMER, pdev->name, &priv->clkevt);
+ if (ret)
+ goto out_clk_disable;
+
+ stm32_clkevent_lp_set_prescaler(priv, &rate);
+
+ stm32_clkevent_lp_init(priv, pdev->dev.parent->of_node, rate);
+
+ priv->dev = &pdev->dev;
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(ddata->clk);
+ return ret;
+}
+
+static int stm32_clkevent_lp_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent */
+}
+
+static const struct of_device_id stm32_clkevent_lp_of_match[] = {
+ { .compatible = "st,stm32-lptimer-timer", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_clkevent_lp_of_match);
+
+static struct platform_driver stm32_clkevent_lp_driver = {
+ .probe = stm32_clkevent_lp_probe,
+ .remove = stm32_clkevent_lp_remove,
+ .driver = {
+ .name = "stm32-lptimer-timer",
+ .of_match_table = of_match_ptr(stm32_clkevent_lp_of_match),
+ },
+};
+module_platform_driver(stm32_clkevent_lp_driver);
+
+MODULE_ALIAS("platform:stm32-lptimer-timer");
+MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index afad06b91c77..47aa90f9a7c2 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -73,8 +73,6 @@ static inline bool has_target(void)
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_init_governor(struct cpufreq_policy *policy);
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
-static int cpufreq_start_governor(struct cpufreq_policy *policy);
-static void cpufreq_stop_governor(struct cpufreq_policy *policy);
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
@@ -705,8 +703,7 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
ret = sprintf(buf, "%u\n", freq);
- else if (cpufreq_driver && cpufreq_driver->setpolicy &&
- cpufreq_driver->get)
+ else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
ret = sprintf(buf, "%u\n", policy->cur);
@@ -2266,7 +2263,7 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy)
module_put(policy->governor->owner);
}
-static int cpufreq_start_governor(struct cpufreq_policy *policy)
+int cpufreq_start_governor(struct cpufreq_policy *policy)
{
int ret;
@@ -2293,7 +2290,7 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy)
return 0;
}
-static void cpufreq_stop_governor(struct cpufreq_policy *policy)
+void cpufreq_stop_governor(struct cpufreq_policy *policy)
{
if (cpufreq_suspended || !policy->governor)
return;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fc459c9c00ff..e0220a6fbc69 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -36,6 +36,7 @@
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
+#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
#define INTEL_CPUFREQ_TRANSITION_DELAY 500
#ifdef CONFIG_ACPI
@@ -220,6 +221,7 @@ struct global_params {
* preference/bias
* @epp_saved: Saved EPP/EPB during system suspend or CPU offline
* operation
+ * @epp_cached Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
@@ -257,6 +259,7 @@ struct cpudata {
s16 epp_policy;
s16 epp_default;
s16 epp_saved;
+ s16 epp_cached;
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
@@ -639,6 +642,26 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
return index;
}
+static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
+{
+ /*
+ * Use the cached HWP Request MSR value, because in the active mode the
+ * register itself may be updated by intel_pstate_hwp_boost_up() or
+ * intel_pstate_hwp_boost_down() at any time.
+ */
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ /*
+ * The only other updater of hwp_req_cached in the active mode,
+ * intel_pstate_hwp_set(), is called under the same lock as this
+ * function, so it cannot run in parallel with the update below.
+ */
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
int pref_index, bool use_raw,
u32 raw_epp)
@@ -650,28 +673,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
epp = cpu_data->epp_default;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
- /*
- * Use the cached HWP Request MSR value, because the register
- * itself may be updated by intel_pstate_hwp_boost_up() or
- * intel_pstate_hwp_boost_down() at any time.
- */
- u64 value = READ_ONCE(cpu_data->hwp_req_cached);
-
- value &= ~GENMASK_ULL(31, 24);
-
if (use_raw)
epp = raw_epp;
else if (epp == -EINVAL)
epp = epp_values[pref_index - 1];
- value |= (u64)epp << 24;
- /*
- * The only other updater of hwp_req_cached in the active mode,
- * intel_pstate_hwp_set(), is called under the same lock as this
- * function, so it cannot run in parallel with the update below.
- */
- WRITE_ONCE(cpu_data->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+ ret = intel_pstate_set_epp(cpu_data, epp);
} else {
if (epp == -EINVAL)
epp = (pref_index - 1) << 2;
@@ -697,10 +704,12 @@ static ssize_t show_energy_performance_available_preferences(
cpufreq_freq_attr_ro(energy_performance_available_preferences);
+static struct cpufreq_driver intel_pstate;
+
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
- struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
char str_preference[21];
bool raw = false;
ssize_t ret;
@@ -725,15 +734,44 @@ static ssize_t store_energy_performance_preference(
raw = true;
}
+ /*
+ * This function runs with the policy R/W semaphore held, which
+ * guarantees that the driver pointer will not change while it is
+ * running.
+ */
+ if (!intel_pstate_driver)
+ return -EAGAIN;
+
mutex_lock(&intel_pstate_limits_lock);
- ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp);
- if (!ret)
- ret = count;
+ if (intel_pstate_driver == &intel_pstate) {
+ ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
+ } else {
+ /*
+ * In the passive mode the governor needs to be stopped on the
+ * target CPU before the EPP update and restarted after it,
+ * which is super-heavy-weight, so make sure it is worth doing
+ * upfront.
+ */
+ if (!raw)
+ epp = ret ? epp_values[ret - 1] : cpu->epp_default;
+
+ if (cpu->epp_cached != epp) {
+ int err;
+
+ cpufreq_stop_governor(policy);
+ ret = intel_pstate_set_epp(cpu, epp);
+ err = cpufreq_start_governor(policy);
+ if (!ret) {
+ cpu->epp_cached = epp;
+ ret = err;
+ }
+ }
+ }
mutex_unlock(&intel_pstate_limits_lock);
- return ret;
+ return ret ?: count;
}
static ssize_t show_energy_performance_preference(
@@ -1145,8 +1183,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
return count;
}
-static struct cpufreq_driver intel_pstate;
-
static void update_qos_request(enum freq_qos_req_type type)
{
int max_state, turbo_max, freq, i, perf_pct;
@@ -1330,9 +1366,10 @@ static const struct attribute_group intel_pstate_attr_group = {
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
+static struct kobject *intel_pstate_kobject;
+
static void __init intel_pstate_sysfs_expose_params(void)
{
- struct kobject *intel_pstate_kobject;
int rc;
intel_pstate_kobject = kobject_create_and_add("intel_pstate",
@@ -1357,17 +1394,31 @@ static void __init intel_pstate_sysfs_expose_params(void)
rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
WARN_ON(rc);
- if (hwp_active) {
- rc = sysfs_create_file(intel_pstate_kobject,
- &hwp_dynamic_boost.attr);
- WARN_ON(rc);
- }
-
if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
WARN_ON(rc);
}
}
+
+static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
+{
+ int rc;
+
+ if (!hwp_active)
+ return;
+
+ rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+ WARN_ON_ONCE(rc);
+}
+
+static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
+{
+ if (!hwp_active)
+ return;
+
+ sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+}
+
/************************** sysfs end ************************/
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
@@ -2247,7 +2298,10 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
- intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+ if (hwp_active)
+ intel_pstate_hwp_force_min_perf(policy->cpu);
+ else
+ intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
}
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
@@ -2255,12 +2309,10 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
intel_pstate_clear_update_util_hook(policy->cpu);
- if (hwp_active) {
+ if (hwp_active)
intel_pstate_hwp_save_state(policy);
- intel_pstate_hwp_force_min_perf(policy->cpu);
- } else {
- intel_cpufreq_stop_cpu(policy);
- }
+
+ intel_cpufreq_stop_cpu(policy);
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
@@ -2390,13 +2442,71 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100));
}
+static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
+ bool fast_switch)
+{
+ u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
+
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(target_pstate);
+
+ /*
+ * The entire MSR needs to be updated in order to update the HWP min
+ * field in it, so opportunistically update the max too if needed.
+ */
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+
+ if (value == prev)
+ return;
+
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ if (fast_switch)
+ wrmsrl(MSR_HWP_REQUEST, value);
+ else
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
+static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+ u32 target_pstate, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+ else
+ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+}
+
+static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
+ bool fast_switch)
+{
+ int old_pstate = cpu->pstate.current_pstate;
+
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ if (target_pstate != old_pstate) {
+ cpu->pstate.current_pstate = target_pstate;
+ if (hwp_active)
+ intel_cpufreq_adjust_hwp(cpu, target_pstate,
+ fast_switch);
+ else
+ intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
+ fast_switch);
+ }
+
+ intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
+ INTEL_PSTATE_TRACE_TARGET, old_pstate);
+
+ return target_pstate;
+}
+
static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
struct cpufreq_freqs freqs;
- int target_pstate, old_pstate;
+ int target_pstate;
update_turbo_state();
@@ -2404,6 +2514,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
+
switch (relation) {
case CPUFREQ_RELATION_L:
target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
@@ -2415,15 +2526,11 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
break;
}
- target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- old_pstate = cpu->pstate.current_pstate;
- if (target_pstate != cpu->pstate.current_pstate) {
- cpu->pstate.current_pstate = target_pstate;
- wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
- pstate_funcs.get_val(cpu, target_pstate));
- }
+
+ target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+
freqs.new = target_pstate * cpu->pstate.scaling;
- intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
+
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -2433,15 +2540,14 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- int target_pstate, old_pstate;
+ int target_pstate;
update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
- target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- old_pstate = cpu->pstate.current_pstate;
- intel_pstate_update_pstate(cpu, target_pstate);
- intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
+
+ target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+
return target_pstate * cpu->pstate.scaling;
}
@@ -2461,7 +2567,6 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
- policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
/* This reflects the intel_pstate_get_cpu_pstates() setting. */
policy->cur = policy->cpuinfo.min_freq;
@@ -2473,10 +2578,18 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- if (hwp_active)
+ if (hwp_active) {
+ u64 value;
+
intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
- else
+ policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24;
+ } else {
turbo_max = cpu->pstate.turbo_pstate;
+ policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
+ }
min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
min_freq *= cpu->pstate.scaling;
@@ -2553,6 +2666,10 @@ static void intel_pstate_driver_cleanup(void)
}
}
put_online_cpus();
+
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_sysfs_hide_hwp_dynamic_boost();
+
intel_pstate_driver = NULL;
}
@@ -2560,6 +2677,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
int ret;
+ if (driver == &intel_pstate)
+ intel_pstate_sysfs_expose_hwp_dynamic_boost();
+
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
@@ -2577,9 +2697,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
static int intel_pstate_unregister_driver(void)
{
- if (hwp_active)
- return -EBUSY;
-
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_driver_cleanup();
@@ -2835,7 +2952,10 @@ static int __init intel_pstate_init(void)
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
- default_driver = &intel_pstate;
+ intel_cpufreq.attr = hwp_cpufreq_attrs;
+ if (!default_driver)
+ default_driver = &intel_pstate;
+
goto hwp_cpu_matched;
}
} else {
@@ -2906,14 +3026,13 @@ static int __init intel_pstate_setup(char *str)
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable")) {
+ if (!strcmp(str, "disable"))
no_load = 1;
- } else if (!strcmp(str, "active")) {
+ else if (!strcmp(str, "active"))
default_driver = &intel_pstate;
- } else if (!strcmp(str, "passive")) {
+ else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq;
- no_hwp = 1;
- }
+
if (!strcmp(str, "no_hwp")) {
pr_info("HWP disabled\n");
no_hwp = 1;
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index bb61677c11c7..ef0a3216a386 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -129,7 +129,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
case 0x0D: /* Pentium M (Dothan) */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
- /* fall through */
+ fallthrough;
case 0x09: /* Pentium M (Banias) */
return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 5c4f8f07c5a6..a13a2d1e444e 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -366,7 +366,7 @@ enum speedstep_processor speedstep_detect_processor(void)
} else
return SPEEDSTEP_CPU_PIII_C;
}
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index bae527e507e0..e1d931c457a7 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -56,9 +56,11 @@ struct read_counters_work {
static struct workqueue_struct *read_counters_wq;
-static enum cluster get_cpu_cluster(u8 cpu)
+static void get_cpu_cluster(void *cluster)
{
- return MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+
+ *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
/*
@@ -186,8 +188,10 @@ static unsigned int tegra194_get_speed(u32 cpu)
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- int cl = get_cpu_cluster(policy->cpu);
u32 cpu;
+ u32 cl;
+
+ smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
if (cl >= data->num_clusters)
return -EINVAL;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index ab0de27539ad..8f9fdd864391 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -86,11 +86,11 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
case DRA76_EFUSE_HAS_PLUS_MPU_OPP:
case DRA76_EFUSE_HAS_ALL_MPU_OPP:
calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP;
- /* Fall through */
+ fallthrough;
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
- /* Fall through */
+ fallthrough;
case DRA7_EFUSE_HAS_OD_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 87197319ab06..04becd70cc41 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/tick.h>
+#include <linux/mmu_context.h>
#include <trace/events/power.h>
#include "cpuidle.h"
@@ -145,21 +146,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* executing it contains RCU usage regarded as invalid in the idle
* context, so tell RCU about that.
*/
- RCU_NONIDLE(tick_freeze());
+ tick_freeze();
/*
* The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping
* suspended is generally unsafe.
*/
stop_critical_timings();
+ rcu_idle_enter();
drv->states[index].enter_s2idle(dev, drv, index);
- WARN_ON(!irqs_disabled());
+ if (WARN_ON_ONCE(!irqs_disabled()))
+ local_irq_disable();
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
* first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
- RCU_NONIDLE(tick_unfreeze());
+ rcu_idle_exit();
+ tick_unfreeze();
start_critical_timings();
time_end = ns_to_ktime(local_clock());
@@ -225,19 +229,24 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
broadcast = false;
}
+ if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(dev->cpu);
+
/* Take note of the planned idle state. */
sched_idle_set_state(target_state);
- trace_cpu_idle_rcuidle(index, dev->cpu);
+ trace_cpu_idle(index, dev->cpu);
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
+ rcu_idle_enter();
entered_state = target_state->enter(dev, drv, index);
+ rcu_idle_exit();
start_critical_timings();
sched_clock_idle_wakeup_event();
time_end = ns_to_ktime(local_clock());
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+ trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index aa3a4ed07a66..52a9b7cf6576 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -873,6 +873,9 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
select HW_RANDOM
select SG_SPLIT
help
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 1a46eeddf082..809c3033ca74 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2310,7 +2310,7 @@ static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
ret = 0;
- /* Fallthrough */
+ fallthrough;
default:
artpec6_crypto_common_destroy(&req_ctx->common);
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index dc5fda522719..4fe7898c8561 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -90,11 +90,11 @@ static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- /* Fall through */
+ fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- /* Fall through */
+ fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
index 91dee616d15e..c5cce024886a 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -135,7 +135,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
break;
/* update to the next state and also initialize TCB */
tx_info->connection_state = new_state;
- /* FALLTHRU */
+ fallthrough;
case KTLS_CONN_ACT_OPEN_RPL:
/* if we are stuck in this state, means tcb init might not
* received by HW, try sending it again.
@@ -150,7 +150,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
break;
/* update to the next state and check if l2t_state is valid */
tx_info->connection_state = new_state;
- /* FALLTHRU */
+ fallthrough;
case KTLS_CONN_SET_TCB_RPL:
/* Check if l2t state is valid, then move to ready state. */
if (cxgb4_check_l2t_valid(tx_info->l2te)) {
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index cbc3d7869ebe..c80baf1ad90b 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -140,11 +140,11 @@ static inline int setup_sgio_components(struct pci_dev *pdev,
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- /* Fall through */
+ fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- /* Fall through */
+ fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 1c8ca151a963..ec9b390276d6 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -131,9 +131,10 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
ADF_CSR_WR(mailbox, mb_offset, 1);
- ret = readl_poll_timeout(mailbox + mb_offset, status,
- status == 0, ADF_ADMIN_POLL_DELAY_US,
- ADF_ADMIN_POLL_TIMEOUT_US);
+ ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
+ ADF_ADMIN_POLL_DELAY_US,
+ ADF_ADMIN_POLL_TIMEOUT_US, true,
+ mailbox, mb_offset);
if (ret < 0) {
/* Response timeout */
dev_err(&GET_DEV(accel_dev),
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 519fd5acf713..8b090b7ae8c6 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -340,7 +340,7 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
/* VF is newer than PF and decides whether it is compatible */
if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
break;
- /* fall through */
+ fallthrough;
case ADF_PF2VF_VF_INCOMPATIBLE:
dev_err(&GET_DEV(accel_dev),
"PF (vers %d) and VF (vers %d) are not compatible\n",
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index bff759e2f811..00c615f9f9a8 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -752,7 +752,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
@@ -762,7 +762,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
@@ -772,7 +772,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index f22f6fa612b3..9866c2a5e9a7 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -314,17 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
case CRYP_KEY_SIZE_256:
ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_192:
ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_128:
ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
- /* Fall through */
+ fallthrough;
default:
ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
@@ -364,17 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data,
case CRYP_KEY_SIZE_256:
writel_relaxed(ctx->key_4_l, &reg->key_4_l);
writel_relaxed(ctx->key_4_r, &reg->key_4_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_192:
writel_relaxed(ctx->key_3_l, &reg->key_3_l);
writel_relaxed(ctx->key_3_r, &reg->key_3_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_128:
writel_relaxed(ctx->key_2_l, &reg->key_2_l);
writel_relaxed(ctx->key_2_r, &reg->key_2_r);
- /* Fall through */
+ fallthrough;
default:
writel_relaxed(ctx->key_1_l, &reg->key_1_l);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0c66d6193ca2..080955a1dd9c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -204,8 +204,8 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
u32 status;
int err;
- virtio_cread(vcrypto->vdev,
- struct virtio_crypto_config, status, &status);
+ virtio_cread_le(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
/*
* Unknown status bits would be a host error and the driver
@@ -323,31 +323,31 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (!vcrypto)
return -ENOMEM;
- virtio_cread(vdev, struct virtio_crypto_config,
+ virtio_cread_le(vdev, struct virtio_crypto_config,
max_dataqueues, &max_data_queues);
if (max_data_queues < 1)
max_data_queues = 1;
- virtio_cread(vdev, struct virtio_crypto_config,
- max_cipher_key_len, &max_cipher_key_len);
- virtio_cread(vdev, struct virtio_crypto_config,
- max_auth_key_len, &max_auth_key_len);
- virtio_cread(vdev, struct virtio_crypto_config,
- max_size, &max_size);
- virtio_cread(vdev, struct virtio_crypto_config,
- crypto_services, &crypto_services);
- virtio_cread(vdev, struct virtio_crypto_config,
- cipher_algo_l, &cipher_algo_l);
- virtio_cread(vdev, struct virtio_crypto_config,
- cipher_algo_h, &cipher_algo_h);
- virtio_cread(vdev, struct virtio_crypto_config,
- hash_algo, &hash_algo);
- virtio_cread(vdev, struct virtio_crypto_config,
- mac_algo_l, &mac_algo_l);
- virtio_cread(vdev, struct virtio_crypto_config,
- mac_algo_h, &mac_algo_h);
- virtio_cread(vdev, struct virtio_crypto_config,
- aead_algo, &aead_algo);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index f50828526331..32642634c1bb 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id;
if (blocksize != PAGE_SIZE) {
- pr_debug("%s: error: unsupported blocksize for dax\n",
+ pr_info("%s: error: unsupported blocksize for dax\n",
bdevname(bdev, buf));
return false;
}
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
- pr_debug("%s: error: unaligned partition for dax\n",
+ pr_info("%s: error: unaligned partition for dax\n",
bdevname(bdev, buf));
return false;
}
@@ -95,7 +95,13 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) {
- pr_debug("%s: error: unaligned partition for dax\n",
+ pr_info("%s: error: unaligned partition for dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
+ if (!dax_dev && !bdev_dax_supported(bdev, blocksize)) {
+ pr_debug("%s: error: dax unsupported by block device\n",
bdevname(bdev, buf));
return false;
}
@@ -103,11 +109,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
- dax_read_unlock(id);
if (len < 1 || len2 < 1) {
- pr_debug("%s: error: dax access failed (%ld)\n",
+ pr_info("%s: error: dax access failed (%ld)\n",
bdevname(bdev, buf), len < 1 ? len : len2);
+ dax_read_unlock(id);
return false;
}
@@ -137,9 +143,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
put_dev_pagemap(end_pgmap);
}
+ dax_read_unlock(id);
if (!dax_enabled) {
- pr_debug("%s: error: dax support not enabled\n",
+ pr_info("%s: error: dax support not enabled\n",
bdevname(bdev, buf));
return false;
}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 07f5273207e7..434a3314fb0e 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -52,12 +52,6 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
-struct lock_class_key reservation_seqcount_class;
-EXPORT_SYMBOL(reservation_seqcount_class);
-
-const char reservation_seqcount_string[] = "reservation_seqcount";
-EXPORT_SYMBOL(reservation_seqcount_string);
-
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
@@ -143,9 +137,8 @@ subsys_initcall(dma_resv_lockdep);
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
+ seqcount_ww_mutex_init(&obj->seq, &obj->lock);
- __seqcount_init(&obj->seq, reservation_seqcount_string,
- &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
@@ -275,7 +268,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
- preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < count; ++i) {
@@ -297,7 +289,6 @@ replace:
smp_store_mb(fobj->shared_count, count);
write_seqcount_end(&obj->seq);
- preempt_enable();
dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -324,14 +315,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (fence)
dma_fence_get(fence);
- preempt_disable();
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
old->shared_count = 0;
write_seqcount_end(&obj->seq);
- preempt_enable();
/* inplace update, no shared fences */
while (i--)
@@ -409,13 +398,11 @@ retry:
src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst);
- preempt_disable();
write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(dst->fence_excl, new);
RCU_INIT_POINTER(dst->fence, dst_list);
write_seqcount_end(&dst->seq);
- preempt_enable();
dma_resv_list_free(src_list);
dma_fence_put(old);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9adc7a2fa3d3..a24882ba3764 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1767,7 +1767,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x)
default:
dev_err(&pl08x->adev->dev,
"illegal burst size for memcpy, set to 1\n");
- /* Fall through */
+ fallthrough;
case PL08X_BURST_SZ_1:
cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
@@ -1806,7 +1806,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x)
default:
dev_err(&pl08x->adev->dev,
"illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case PL08X_BUS_WIDTH_8_BITS:
cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1850,7 +1850,7 @@ static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x)
default:
dev_err(&pl08x->adev->dev,
"illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case PL08X_BUS_WIDTH_8_BITS:
cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
@@ -2612,7 +2612,7 @@ static int pl08x_of_probe(struct amba_device *adev,
switch (val) {
default:
dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
- /* Fall through */
+ fallthrough;
case 1:
pd->memcpy_burst_size = PL08X_BURST_SZ_1;
break;
@@ -2647,7 +2647,7 @@ static int pl08x_of_probe(struct amba_device *adev,
switch (val) {
default:
dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case 8:
pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS;
break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ad72b3f42ffa..e342cf52d296 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,7 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
- /* Fall through */
+ fallthrough;
case FSL_DMA_IP_83XX:
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 56f18ae99233..308bed0a560a 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -205,10 +205,10 @@ struct fsldma_chan {
#else
static u64 fsl_ioread64(const u64 __iomem *addr)
{
- u32 fsl_addr = lower_32_bits(addr);
- u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32;
+ u32 val_lo = in_le32((u32 __iomem *)addr);
+ u32 val_hi = in_le32((u32 __iomem *)addr + 1);
- return fsl_addr_hi | in_le32((u32 *)fsl_addr);
+ return ((u64)val_hi << 32) + val_lo;
}
static void fsl_iowrite64(u64 val, u64 __iomem *addr)
@@ -219,10 +219,10 @@ static void fsl_iowrite64(u64 val, u64 __iomem *addr)
static u64 fsl_ioread64be(const u64 __iomem *addr)
{
- u32 fsl_addr = lower_32_bits(addr);
- u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32;
+ u32 val_hi = in_be32((u32 __iomem *)addr);
+ u32 val_lo = in_be32((u32 __iomem *)addr + 1);
- return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1));
+ return ((u64)val_hi << 32) + val_lo;
}
static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 5c0fb3134825..88717506c1f6 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -556,7 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
* We fall-through here intentionally, since a 2D transfer is
* similar to MEMCPY just adding the 2D slot configuration.
*/
- /* Fall through */
+ fallthrough;
case IMXDMA_DESC_MEMCPY:
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h
index c499c9578f00..d44eabb6f5eb 100644
--- a/drivers/dma/iop-adma.h
+++ b/drivers/dma/iop-adma.h
@@ -496,7 +496,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
}
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
src_cnt = 24;
- /* fall through */
+ fallthrough;
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
@@ -510,7 +510,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
src_cnt = 16;
- /* fall through */
+ fallthrough;
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
@@ -522,7 +522,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
}
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
src_cnt = 8;
- /* fall through */
+ fallthrough;
case 2 ... 8:
shift = 1;
for (i = 0; i < src_cnt; i++) {
@@ -602,19 +602,19 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
case 25 ... 32:
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 1 ... 8:
if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 74df621402e1..ca4e0930207a 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -483,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
default:
pr_warn("%s(): invalid bus width %u\n", __func__, width);
- /* fall through */
+ fallthrough;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
size = burst;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 2c508ee672b9..9b69716172a4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1061,16 +1061,16 @@ static bool _start(struct pl330_thread *thrd)
if (_state(thrd) == PL330_STATE_KILLING)
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_FAULTING:
_stop(thrd);
- /* fall through */
+ fallthrough;
case PL330_STATE_KILLING:
case PL330_STATE_COMPLETING:
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_STOPPED:
return _trigger(thrd);
@@ -1121,7 +1121,6 @@ static u32 _emit_load(unsigned int dry_run, u8 buf[],
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_MEM_TO_DEV:
off += _emit_LD(dry_run, &buf[off], cond);
break;
@@ -1155,7 +1154,6 @@ static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _emit_ST(dry_run, &buf[off], cond);
break;
@@ -1216,7 +1214,6 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
cond);
@@ -1266,7 +1263,6 @@ static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2deeaab078a4..788d696323bb 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -383,7 +383,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
switch (desc->mark) {
case DESC_COMPLETED:
desc->mark = DESC_WAITING;
- /* Fall through */
+ fallthrough;
case DESC_WAITING:
if (head_acked)
async_tx_ack(&desc->async_tx);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 6262f6370c5d..fcc08bbf6945 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3375,7 +3375,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
pvt->ops = &family_types[F17_M70H_CPUS].ops;
break;
}
- /* fall through */
+ fallthrough;
case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index da60c29468a7..54ebc8afc6b1 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -55,6 +55,8 @@ static DEFINE_SPINLOCK(ghes_lock);
static bool __read_mostly force_load;
module_param(force_load, bool, 0);
+static bool system_scanned;
+
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
u8 type;
@@ -225,14 +227,12 @@ static void enumerate_dimms(const struct dmi_header *dh, void *arg)
static void ghes_scan_system(void)
{
- static bool scanned;
-
- if (scanned)
+ if (system_scanned)
return;
dmi_walk(enumerate_dimms, &ghes_hw);
- scanned = true;
+ system_scanned = true;
}
void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
@@ -631,6 +631,8 @@ void ghes_edac_unregister(struct ghes *ghes)
mutex_lock(&ghes_reg_mutex);
+ system_scanned = false;
+
if (!refcount_dec_and_test(&ghes_refcount))
goto unlock;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 5860ca41185c..2acd9f9284a2 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv)
- tp_event = HW_EVENT_ERR_FATAL;
- else
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ else
+ tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index d68346a8e141..ebe50996cc42 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -170,6 +170,8 @@
(n << (28 + (2 * skl) - PAGE_SHIFT))
static int nr_channels;
+static struct pci_dev *mci_pdev;
+static int ie31200_registered = 1;
struct ie31200_priv {
void __iomem *window;
@@ -538,12 +540,16 @@ fail_free:
static int ie31200_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- edac_dbg(0, "MC:\n");
+ int rc;
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
+ rc = ie31200_probe1(pdev, ent->driver_data);
+ if (rc == 0 && !mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
- return ie31200_probe1(pdev, ent->driver_data);
+ return rc;
}
static void ie31200_remove_one(struct pci_dev *pdev)
@@ -552,6 +558,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
struct ie31200_priv *priv;
edac_dbg(0, "\n");
+ pci_dev_put(mci_pdev);
+ mci_pdev = NULL;
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
@@ -593,17 +601,53 @@ static struct pci_driver ie31200_driver = {
static int __init ie31200_init(void)
{
+ int pci_rc, i;
+
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- return pci_register_driver(&ie31200_driver);
+ pci_rc = pci_register_driver(&ie31200_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ ie31200_registered = 0;
+ for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
+ mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
+ ie31200_pci_tbl[i].device,
+ NULL);
+ if (mci_pdev)
+ break;
+ }
+ if (!mci_pdev) {
+ edac_dbg(0, "ie31200 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
+ if (pci_rc < 0) {
+ edac_dbg(0, "ie31200 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+ return 0;
+
+fail1:
+ pci_unregister_driver(&ie31200_driver);
+fail0:
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
}
static void __exit ie31200_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&ie31200_driver);
+ if (!ie31200_registered)
+ ie31200_remove_one(mci_pdev);
}
module_init(ie31200_init);
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index fd363746f5b0..928f63a374c7 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -198,7 +198,7 @@ static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
switch (sz) {
case 8:
ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
- /* fall through */
+ fallthrough;
case 4:
ret |= _apl_rd_reg(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
@@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
int rc;
- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
+ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
HW_EVENT_ERR_CORRECTED;
/*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index d414698ca324..c5ab634cb6a4 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- tp_event = HW_EVENT_ERR_FATAL;
- } else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ } else {
+ tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 6d8d6dc626bf..2b4ce8e5ac2f 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -493,9 +493,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- tp_event = HW_EVENT_ERR_FATAL;
- } else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ } else {
+ tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index b785e936244f..80db43a22069 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation)
device->bc_implemented = BC_IMPLEMENTED;
break;
}
- /* else, fall through - to case address error */
+ fallthrough; /* to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 185b0b78b3d6..af70e74f9a7e 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -277,7 +277,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
if ((data[0] & bit) == (data[1] & bit))
continue;
- /* fall through - It's a 1394-1995 IRM, retry. */
+ fallthrough; /* It's a 1394-1995 IRM, retry */
default:
if (retry) {
retry--;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 94a13fca8267..ec68ed27b0a5 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -54,7 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
- /* fall through */
+ fallthrough;
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 439d918bbaaf..ac487c96bb71 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1097,14 +1097,14 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
rcode = RCODE_ADDRESS_ERROR;
break;
}
- /* else fall through */
+ fallthrough;
case CSR_NODE_IDS:
/*
* per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
* and 9.6, but interoperable with IEEE 1394.1-2004 bridges
*/
- /* fall through */
+ fallthrough;
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7dde21b18b04..020cb15a4d8f 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1495,7 +1495,7 @@ static int handle_at_packet(struct context *context,
packet->ack = RCODE_GENERATION;
break;
}
- /* fall through */
+ fallthrough;
default:
packet->ack = RCODE_SEND_ERROR;
@@ -3054,7 +3054,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
- /* fall through */
+ fallthrough;
case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index e7e36aab2386..b4b9ce97f415 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1136,15 +1136,14 @@ int sdei_event_handler(struct pt_regs *regs,
* access kernel memory.
* Do the same here because this doesn't come via the same entry code.
*/
- orig_addr_limit = get_fs();
- set_fs(USER_DS);
+ orig_addr_limit = force_uaccess_begin();
err = arg->callback(event_num, regs, arg->callback_arg);
if (err)
pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
event_num, smp_processor_id(), err);
- set_fs(orig_addr_limit);
+ force_uaccess_end(orig_addr_limit);
return err;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index fdd1db025dbf..3aa07c3b5136 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -381,6 +381,7 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
+ destroy_workqueue(efi_rts_wq);
return -ENOMEM;
}
@@ -424,6 +425,7 @@ err_unregister:
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
+ destroy_workqueue(efi_rts_wq);
return error;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 6bca70bbb43d..f735db55adc0 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -187,20 +187,28 @@ int efi_printk(const char *fmt, ...)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len = strlen(cmdline) + 1;
+ size_t len;
efi_status_t status;
char *str, *buf;
+ if (!cmdline)
+ return EFI_SUCCESS;
+
+ len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
if (status != EFI_SUCCESS)
return status;
- str = skip_spaces(memcpy(buf, cmdline, len));
+ memcpy(buf, cmdline, len - 1);
+ buf[len - 1] = '\0';
+ str = skip_spaces(buf);
while (*str) {
char *param, *val;
str = next_arg(str, &param, &val);
+ if (!val && !strcmp(param, "--"))
+ break;
if (!strcmp(param, "nokaslr")) {
efi_nokaslr = true;
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 53cee17d0115..722af9ee53d6 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -65,36 +65,18 @@ struct ti_sci_xfers_info {
};
/**
- * struct ti_sci_rm_type_map - Structure representing TISCI Resource
- * management representation of dev_ids.
- * @dev_id: TISCI device ID
- * @type: Corresponding id as identified by TISCI RM.
- *
- * Note: This is used only as a work around for using RM range apis
- * for AM654 SoC. For future SoCs dev_id will be used as type
- * for RM range APIs. In order to maintain ABI backward compatibility
- * type is not being changed for AM654 SoC.
- */
-struct ti_sci_rm_type_map {
- u32 dev_id;
- u16 type;
-};
-
-/**
* struct ti_sci_desc - Description of SoC integration
* @default_host_id: Host identifier representing the compute entity
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msgs: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
- * @rm_type_map: RM resource type mapping structure.
*/
struct ti_sci_desc {
u8 default_host_id;
int max_rx_timeout_ms;
int max_msgs;
int max_msg_size;
- struct ti_sci_rm_type_map *rm_type_map;
};
/**
@@ -1710,33 +1692,6 @@ fail:
return ret;
}
-static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
- u16 *type)
-{
- struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
- bool found = false;
- int i;
-
- /* If map is not provided then assume dev_id is used as type */
- if (!rm_type_map) {
- *type = dev_id;
- return 0;
- }
-
- for (i = 0; rm_type_map[i].dev_id; i++) {
- if (rm_type_map[i].dev_id == dev_id) {
- *type = rm_type_map[i].type;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return 0;
-}
-
/**
* ti_sci_get_resource_range - Helper to get a range of resources assigned
* to a host. Resource is uniquely identified by
@@ -1760,7 +1715,6 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
- u16 type;
int ret = 0;
if (IS_ERR(handle))
@@ -1780,15 +1734,9 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
return ret;
}
- ret = ti_sci_get_resource_type(info, dev_id, &type);
- if (ret) {
- dev_err(dev, "rm type lookup failed for %u\n", dev_id);
- goto fail;
- }
-
req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
req->secondary_host = s_host;
- req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+ req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
ret = ti_sci_do_xfer(info, xfer);
@@ -3260,61 +3208,50 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
/**
- * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
* @handle: TISCI handle
* @dev: Device pointer to which the resource is assigned
* @dev_id: TISCI device id to which the resource is assigned
- * @of_prop: property name by which the resource are represented
+ * @sub_types: Array of sub_types assigned corresponding to device
+ * @sets: Number of sub_types
*
* Return: Pointer to ti_sci_resource if all went well else appropriate
* error pointer.
*/
-struct ti_sci_resource *
-devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
- struct device *dev, u32 dev_id, char *of_prop)
+static struct ti_sci_resource *
+devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, u32 *sub_types,
+ u32 sets)
{
struct ti_sci_resource *res;
bool valid_set = false;
- u32 resource_subtype;
int i, ret;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
- sizeof(u32));
- if (ret < 0) {
- dev_err(dev, "%s resource type ids not available\n", of_prop);
- return ERR_PTR(ret);
- }
- res->sets = ret;
-
+ res->sets = sets;
res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
GFP_KERNEL);
if (!res->desc)
return ERR_PTR(-ENOMEM);
for (i = 0; i < res->sets; i++) {
- ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
- &resource_subtype);
- if (ret)
- return ERR_PTR(-EINVAL);
-
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
- resource_subtype,
+ sub_types[i],
&res->desc[i].start,
&res->desc[i].num);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
- dev_id, resource_subtype);
+ dev_id, sub_types[i]);
res->desc[i].start = 0;
res->desc[i].num = 0;
continue;
}
dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
- dev_id, resource_subtype, res->desc[i].start,
+ dev_id, sub_types[i], res->desc[i].start,
res->desc[i].num);
valid_set = true;
@@ -3332,6 +3269,62 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
return ERR_PTR(-EINVAL);
}
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 *sub_types;
+ int sets;
+
+ sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(sets);
+ }
+
+ sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
+ if (!sub_types)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
+ res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
+ sets);
+
+ kfree(sub_types);
+ return res;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
+
+/**
+ * devm_ti_sci_get_resource() - Get a resource range assigned to the device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @suub_type: TISCI resource subytpe representing the resource.
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type)
+{
+ return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
@@ -3352,17 +3345,6 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 64,
- .rm_type_map = NULL,
-};
-
-static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
- {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
- {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
- {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
- {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
- {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
- {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
- {.dev_id = 0, .type = 0x000}, /* end of table */
};
/* Description for AM654 */
@@ -3373,7 +3355,6 @@ static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 60,
- .rm_type_map = ti_sci_am654_rm_type_map,
};
static const struct of_device_id ti_sci_of_match[] = {
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index d16645c1d8d9..3aa45934d60c 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -303,16 +303,16 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 879db23d8454..bf08b4561f36 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -611,16 +611,16 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 53fae02c40ad..d5359341cc6b 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -129,7 +129,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
case IRQ_TYPE_LEVEL_HIGH:
polarity |= mask;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type |= mask;
break;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 8c9757774010..ad61daf6c212 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -617,14 +617,12 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
sprd_eic->chip.set = sprd_eic_set;
- /* fall-through */
+ fallthrough;
case SPRD_EIC_ASYNC:
- /* fall-through */
case SPRD_EIC_SYNC:
sprd_eic->chip.get = sprd_eic_get;
break;
case SPRD_EIC_LATCH:
- /* fall-through */
default:
break;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 6c48809d0505..b0155d6007c8 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -308,7 +308,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
edge_det = !!(ret & mask);
- /* fall through */
+ fallthrough;
case STMPE1801:
rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
@@ -321,7 +321,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
fall = !!(ret & mask);
- /* fall through */
+ fallthrough;
case STMPE801:
case STMPE1600:
irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9276051663da..54ca3c18b291 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1264,7 +1264,7 @@ static int acpi_gpio_package_count(const union acpi_object *obj)
switch (element->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
element += 3;
- /* Fallthrough */
+ fallthrough;
case ACPI_TYPE_INTEGER:
element++;
count++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 7e59e473a190..cdea1338c8dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -152,7 +152,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index c7fd0c47b254..1102de76d876 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t sdma_engine_reg_base[2] = {
- SOC15_REG_OFFSET(SDMA0, 0,
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA1, 0,
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
- };
- uint32_t retval = sdma_engine_reg_base[engine_id]
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
- queue_id, retval);
+ queue_id, sdma_rlc_reg_offset);
- return retval;
+ return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e5a5ba869eb4..a58af513c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_count = k;
/* Install the new fence list, seqcount provides the barriers */
- preempt_disable();
write_seqcount_begin(&resv->seq);
RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
- preempt_enable();
/* Drop the references to the removed fences or move them to ef_list */
for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index aa5b54e5a1d7..eb7cfe87042e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2574,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ adev->ip_blocks[i].status.hw = false;
+
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j;
struct amdgpu_ip_block *block;
@@ -2581,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
for (j = 0; j < adev->num_ip_blocks; j++) {
block = &adev->ip_blocks[j];
- block->status.hw = false;
if (block->version->type != ip_order[i] ||
!block->status.valid)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0047da06041f..414548064648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -179,6 +179,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
/* enable runpm if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->runpm = true;
@@ -678,8 +679,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* in the bitfields */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
+ else if (se_num >= AMDGPU_GFX_MAX_SE)
+ return -EINVAL;
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
+ return -EINVAL;
if (info->read_mmr_reg.count > 128)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5f20cadee343..e4dbf14320b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -3212,6 +3212,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip crit temp on APU */
+ if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
+ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index fe7d39bb975d..d8c6520ff74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -193,12 +193,18 @@ static int psp_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
psp_memory_training_fini(&adev->psp);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
+ if (adev->psp.sos_fw) {
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
+ }
+ if (adev->psp.asd_fw) {
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ }
+ if (adev->psp.ta_fw) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ }
if (adev->asic_type == CHIP_NAVI10)
psp_sysfs_fini(adev);
@@ -409,11 +415,28 @@ static int psp_clear_vf_fw(struct psp_context *psp)
return ret;
}
+static bool psp_skip_tmr(struct psp_context *psp)
+{
+ switch (psp->adev->asic_type) {
+ case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
+ * Already set up by host driver.
+ */
+ if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
+ return 0;
+
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -499,8 +522,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) ||
- (psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1987,7 +2009,7 @@ static int psp_suspend(void *handle)
ret = psp_tmr_terminate(psp);
if (ret) {
- DRM_ERROR("Falied to terminate tmr\n");
+ DRM_ERROR("Failed to terminate tmr\n");
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index e10f02ed3f65..1bedb416eebd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1243,7 +1243,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->ent)
return;
- debugfs_remove(obj->ent);
obj->ent = NULL;
put_obj(obj);
}
@@ -1257,7 +1256,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove_recursive(con->dir);
con->dir = NULL;
}
/* debugfs end */
@@ -1618,7 +1616,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
data = con->eh_data;
save_count = data->count - control->num_recs;
/* only new entries are saved */
- if (save_count > 0)
+ if (save_count > 0) {
if (amdgpu_ras_eeprom_process_recods(control,
&data->bps[control->num_recs],
true,
@@ -1627,6 +1625,9 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
return -EIO;
}
+ dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 134cc36e30c5..0739e259bf91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -462,7 +462,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
unsigned int pages;
int i, r;
- *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 61e89247faf3..037a187aa42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3082,7 +3082,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
@@ -3127,7 +3127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
@@ -3158,7 +3158,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
};
@@ -7263,10 +7263,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
-
- /* only for Vega10 & Raven1 */
- data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -7529,6 +7527,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 33f1c4a46ebe..88f63d7ea371 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3250,7 +3250,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
- /* fall through */
+ fallthrough;
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cb9d60a4e05e..b95f22262a90 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index fa0bca3e1f73..5d2505956f84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -135,6 +135,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -190,6 +196,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@@ -326,6 +338,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6e4f3ff4810f..b67ba38a195f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1297,7 +1297,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_VEGA10:
if (amdgpu_sriov_vf(adev))
break;
- /* fall through */
+ fallthrough;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 42f1a516005e..c41e5590a701 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -49,12 +49,11 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
static int jpeg_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+ u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ return -ENOENT;
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- return -ENOENT;
- }
adev->jpeg.num_jpeg_inst = 1;
jpeg_v3_0_set_dec_ring_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 757fa8e83f5b..c79fc54bc3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -134,6 +134,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -189,6 +195,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(MMHUB, 0,
mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
@@ -318,6 +330,13 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ea69ae76773e..ca11253e787c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -97,6 +97,49 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags, address, data;
+ u64 r;
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* read low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ r = RREG32(data);
+
+ /* read high 32 bit*/
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ r |= ((u64)RREG32(data) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* write low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v & 0xffffffffULL));
+ (void)RREG32(data);
+
+ /* write high 32 bit */
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v >> 32));
+ (void)RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -319,10 +362,16 @@ nv_asic_reset_method(struct amdgpu_device *adev)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
- if (smu_baco_is_support(smu))
- return AMD_RESET_METHOD_BACO;
- else
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
return AMD_RESET_METHOD_MODE1;
+ default:
+ if (smu_baco_is_support(smu))
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_MODE1;
+ }
}
static int nv_asic_reset(struct amdgpu_device *adev)
@@ -673,6 +722,8 @@ static int nv_common_early_init(void *handle)
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
adev->pcie_wreg = &nv_pcie_wreg;
+ adev->pcie_rreg64 = &nv_pcie_rreg64;
+ adev->pcie_wreg64 = &nv_pcie_wreg64;
/* TODO: will add them during VCN v2 implementation */
adev->uvd_ctx_rreg = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index d488d250805d..e16874f30d5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
break;
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
err = psp_init_ta_microcode(&adev->psp, chip_name);
if (err)
return err;
break;
- case CHIP_NAVY_FLOUNDER:
- break;
default:
BUG();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index ea914b256ebd..b5986d19dc08 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6196,7 +6196,7 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
if (current_link_speed == AMDGPU_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case AMDGPU_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 910a4a32ff78..63e5547cfb16 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1659,7 +1659,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_dec_ring_test_ring,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 407065cd8d57..b51c527a3f0d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -97,6 +97,8 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
#endif
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
@@ -1185,10 +1187,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
break;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
dmub_asic = DMUB_ASIC_DCN30;
fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
break;
+ case CHIP_NAVY_FLOUNDER:
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
#endif
default:
@@ -2191,6 +2196,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
+ drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -2828,12 +2834,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
&dm_atomic_state_funcs);
r = amdgpu_display_modeset_create_props(adev);
- if (r)
+ if (r) {
+ dc_release_state(state->context);
+ kfree(state);
return r;
+ }
r = amdgpu_dm_audio_init(adev);
- if (r)
+ if (r) {
+ dc_release_state(state->context);
+ kfree(state);
return r;
+ }
return 0;
}
@@ -2850,6 +2862,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
#if defined(CONFIG_ACPI)
struct amdgpu_dm_backlight_caps caps;
+ memset(&caps, 0, sizeof(caps));
+
if (dm->backlight_caps.caps_valid)
return;
@@ -2888,51 +2902,50 @@ static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
return rc ? 0 : 1;
}
-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
- const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned *min, unsigned *max)
{
- u32 min, max, conversion_pace;
- u32 brightness = user_brightness;
-
if (!caps)
- goto out;
+ return 0;
- if (!caps->aux_support) {
- max = caps->max_input_signal;
- min = caps->min_input_signal;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- conversion_pace = 0x101;
- brightness =
- user_brightness
- * conversion_pace
- * (max - min)
- / AMDGPU_MAX_BL_LEVEL
- + min * conversion_pace;
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
} else {
- /* TODO
- * We are doing a linear interpolation here, which is OK but
- * does not provide the optimal result. We probably want
- * something close to the Perceptual Quantizer (PQ) curve.
- */
- max = caps->aux_max_input_signal;
- min = caps->aux_min_input_signal;
-
- brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
- + user_brightness * max;
- // Multiple the value by 1000 since we use millinits
- brightness *= 1000;
- brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
}
+ return 1;
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned min, max;
-out:
- return brightness;
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -2948,7 +2961,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
link = (struct dc_link *)dm->backlight_link;
- brightness = convert_brightness(&caps, bd->props.brightness);
+ brightness = convert_brightness_from_user(&caps, bd->props.brightness);
// Change brightness based on AUX property
if (caps.aux_support)
return set_backlight_via_aux(link, brightness);
@@ -2965,7 +2978,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
- return ret;
+ return convert_brightness_to_user(&dm->backlight_caps, ret);
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -8544,6 +8557,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!old_con_state->crtc && !new_con_state->crtc)
+ continue;
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level !=
+ dm_new_con_state->abm_level)
+ new_crtc_state->connectors_changed = true;
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->asic_type >= CHIP_NAVI10) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 998f729976bf..e5a6d9115949 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -35,6 +35,7 @@
#include "dmub/dmub_srv.h"
#include "resource.h"
#include "dsc.h"
+#include "dc_link_dp.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -1150,7 +1151,7 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
return result;
}
-static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
+static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
@@ -1186,7 +1187,7 @@ static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
snprintf(rd_buf_ptr, str_len,
"%d\n",
- dsc_state.dsc_bytes_per_pixel);
+ dsc_state.dsc_bits_per_pixel);
rd_buf_ptr += str_len;
while (size) {
@@ -1460,9 +1461,9 @@ static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
.llseek = default_llseek
};
-static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
+static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
.owner = THIS_MODULE,
- .read = dp_dsc_bytes_per_pixel_read,
+ .read = dp_dsc_bits_per_pixel_read,
.llseek = default_llseek
};
@@ -1552,7 +1553,7 @@ static const struct {
{"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
{"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
{"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
- {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
+ {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
{"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
{"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e85b58f0f416..336aaa09be46 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -67,7 +67,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
- if (payload.write)
+ if (payload.write && result >= 0)
result = msg->size;
if (result < 0)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 008d4d11339d..ad394aefa5d9 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2834,6 +2834,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = bios_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
+
+ .get_atom_dc_golden_table = NULL
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b8684131151d..2d5c7daaee23 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1108,6 +1108,18 @@ static enum bp_result bios_parser_enable_disp_power_gating(
action);
}
+static enum bp_result bios_parser_enable_lvtma_control(
+ struct dc_bios *dcb,
+ uint8_t uc_pwr_on)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_lvtma_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on);
+}
+
static bool bios_parser_is_accelerated_mode(
struct dc_bios *dcb)
{
@@ -2079,6 +2091,85 @@ static uint16_t bios_parser_pack_data_tables(
return 0;
}
+static struct atom_dc_golden_table_v1 *bios_get_golden_table(
+ struct bios_parser *bp,
+ uint32_t rev_major,
+ uint32_t rev_minor,
+ uint16_t *dc_golden_table_ver)
+{
+ struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL;
+ uint32_t dc_golden_offset = 0;
+ *dc_golden_table_ver = 0;
+
+ if (!DATA_TABLES(dce_info))
+ return NULL;
+
+ /* ver.4.4 or higher */
+ switch (rev_major) {
+ case 4:
+ switch (rev_minor) {
+ case 4:
+ disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl_4_4)
+ return NULL;
+ dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset;
+ *dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver;
+ break;
+ }
+ break;
+ }
+
+ if (!dc_golden_offset)
+ return NULL;
+
+ if (*dc_golden_table_ver != 1)
+ return NULL;
+
+ return GET_IMAGE(struct atom_dc_golden_table_v1,
+ dc_golden_offset);
+}
+
+static enum bp_result bios_get_atom_dc_golden_table(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+ uint16_t dc_golden_table_ver = 0;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ if (!header)
+ return BP_RESULT_UNSUPPORTED;
+
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ atom_dc_golden_table = bios_get_golden_table(bp,
+ tbl_revision.major,
+ tbl_revision.minor,
+ &dc_golden_table_ver);
+
+ if (!atom_dc_golden_table)
+ return BP_RESULT_UNSUPPORTED;
+
+ dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver;
+ dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val;
+ dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val;
+ dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val;
+ dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val;
+
+ return result;
+}
+
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -2128,6 +2219,10 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_board_layout_info = bios_get_board_layout_info,
.pack_data_tables = bios_parser_pack_data_tables,
+
+ .get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
+
+ .enable_lvtma_control = bios_parser_enable_lvtma_control
};
static bool bios_parser2_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index bed91572f82a..eb3ae5c3677c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -904,6 +904,33 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
return 0;
}
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** LVTMA CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on);
+
+static void init_enable_lvtma_control(struct bios_parser *bp)
+{
+ /* TODO add switch for table vrsion */
+ bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control;
+
+}
+
+static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ return result;
+}
+
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
{
init_dig_encoder_control(bp);
@@ -919,4 +946,5 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_dce_clock(bp);
init_get_smu_clock_info(bp);
+ init_enable_lvtma_control(bp);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 7a2af24dfe60..7bdce013cde5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -94,7 +94,8 @@ struct cmd_tbl {
struct bp_set_dce_clock_parameters *bp_params);
unsigned int (*get_smu_clock_info)(
struct bios_parser *bp, uint8_t id);
-
+ enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
+ uint8_t uc_pwr_on);
};
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 3fab9296918a..e133edc587d3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
return disp_clk_threshold;
}
-static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
+static void ramp_up_dispclk_with_dpp(
+ struct clk_mgr_internal *clk_mgr,
+ struct dc *dc,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
{
int i;
int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ /* this function is to change dispclk, dppclk and dprefclk according to
+ * bandwidth requirement. Its call stack is rv1_update_clocks -->
+ * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
+ * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
+ * prepare_bandwidth will be called first to allow enough clock,
+ * watermark for change, after end of dcn hw change, optimize_bandwidth
+ * is executed to lower clock to save power for new dcn hw settings.
+ *
+ * below is sequence of commit_planes_for_stream:
+ *
+ * step 1: prepare_bandwidth - raise clock to have enough bandwidth
+ * step 2: lock_doublebuffer_enable
+ * step 3: pipe_control_lock(true) - make dchubp register change will
+ * not take effect right way
+ * step 4: apply_ctx_for_surface - program dchubp
+ * step 5: pipe_control_lock(false) - dchubp register change take effect
+ * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
+ * for full_date, optimize clock to save power
+ *
+ * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
+ * changed for new dchubp configuration. but real dcn hub dchubps are
+ * still running with old configuration until end of step 5. this need
+ * clocks settings at step 1 should not less than that before step 1.
+ * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
+ * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
+ * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
+ * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
+ *
+ * the second condition is based on new dchubp configuration. dppclk
+ * for new dchubp may be different from dppclk before step 1.
+ * for example, before step 1, dchubps are as below:
+ * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
+ * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
+ * for dppclk for pipe0 need dppclk = dispclk
+ *
+ * new dchubp pipe split configuration:
+ * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
+ * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
+ * dppclk only needs dppclk = dispclk /2.
+ *
+ * dispclk, dppclk are not lock by otg master lock. they take effect
+ * after step 1. during this transition, dispclk are the same, but
+ * dppclk is changed to half of previous clock for old dchubp
+ * configuration between step 1 and step 6. This may cause p-state
+ * warning intermittently.
+ *
+ * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
+ * need make sure dppclk are not changed to less between step 1 and 6.
+ * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
+ * new display clock is raised, but we do not know ratio of
+ * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
+ * new_clocks->dispclk_khz /2 does not guarantee equal or higher than
+ * old dppclk. we could ignore power saving different between
+ * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
+ * as long as safe_to_lower = false, set dpclk = dispclk to simplify
+ * condition check.
+ * todo: review this change for other asic.
+ **/
+ if (!safe_to_lower)
+ request_dpp_div = false;
+
/* set disp clk to dpp clk threshold */
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
@@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
|| new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
- ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
+ ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
send_request_to_lower = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index c664404a75d4..543afa34d87a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -94,6 +94,15 @@ int rn_get_active_display_cnt_wa(
return display_count;
}
+void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -516,6 +525,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
.are_clock_states_equal = rn_are_clock_states_equal,
+ .set_low_power_state = rn_set_low_power_state,
.notify_wm_ranges = rn_notify_wm_ranges,
.notify_link_rate_change = rn_notify_link_rate_change,
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index d94fdc52be37..9133646f6d5f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -323,9 +323,10 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
- /* always update dtos unless clock is lowered and not safe to lower */
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
+ * that we do not lower dto when it is not safe to lower. We do not need to
+ * compare the current and new dppclk before calling this function.*/
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ef0b5941bc50..92eb1ca1634f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1250,6 +1250,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ dc_allow_idle_optimizations(dc, false);
+#endif
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1838,6 +1841,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (dc->idle_optimizations_allowed)
+ overall_type = UPDATE_TYPE_FULL;
+
+#endif
if (stream_status == NULL || stream_status->plane_count != surface_count)
overall_type = UPDATE_TYPE_FULL;
@@ -2306,8 +2314,14 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
- dc->hwss.prepare_bandwidth(dc, context);
+ if (update_type == UPDATE_TYPE_FULL) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ dc_allow_idle_optimizations(dc, false);
+
+#endif
+ if (dc->optimize_seamless_boot_streams == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
context_clock_trace(dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 02742cca4d84..437d1a7a16fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -763,6 +763,7 @@ static bool detect_dp(struct dc_link *link,
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
sink_caps,
audio_support);
+ link->dpcd_caps.dongle_type = sink_caps->dongle_type;
}
return true;
@@ -1540,6 +1541,9 @@ static bool dc_link_construct(struct dc_link *link,
}
}
+ if (bios->funcs->get_atom_dc_golden_table)
+ bios->funcs->get_atom_dc_golden_table(bios);
+
/*
* TODO check if GPIO programmed correctly
*
@@ -3102,6 +3106,9 @@ void core_link_enable_stream(
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+#endif
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
@@ -3136,8 +3143,8 @@ void core_link_enable_stream(
pipe_ctx->stream->link->link_state_valid = true;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
- pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
#endif
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
@@ -3276,14 +3283,13 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
core_link_set_avmute(pipe_ctx, true);
}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
-
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 5cb7b834e459..b2be6ad5101d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1133,6 +1133,44 @@ static inline enum link_training_result perform_link_training_int(
return status;
}
+static enum link_training_result check_link_loss_status(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_status lane_status;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /*
+ * check lanes status
+ */
+ lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ status = LINK_TRAINING_LINK_LOSS;
+ break;
+ }
+ }
+
+ return status;
+}
+
static void initialize_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_setting,
@@ -1372,6 +1410,9 @@ static void print_status_message(
case LINK_TRAINING_LQA_FAIL:
lt_result = "LQA failed";
break;
+ case LINK_TRAINING_LINK_LOSS:
+ lt_result = "Link loss";
+ break;
default:
break;
}
@@ -1531,6 +1572,14 @@ enum link_training_result dc_link_dp_perform_link_training(
status);
}
+ /* delay 5ms after Main Link output idle pattern and then check
+ * DPCD 0202h.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ status = check_link_loss_status(link, &lt_settings);
+ }
+
/* 6. print status message*/
print_status_message(link, &lt_settings, status);
@@ -4290,22 +4339,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
- uint8_t dspc = 0;
- enum dc_status ret;
-
- ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
- sizeof(dspc));
-
- if (ret != DC_OK) {
- DC_LOG_ERROR("Error in DP aux read transaction,"
- " not writing source specific data\n");
- return;
- }
-
- /* Return if OUI unsupported */
- if (!(dspc & DP_OUI_SUPPORT))
- return;
-
if (!link->dc->vendor_signature.is_valid) {
struct dpcd_amd_signature amd_signature;
amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
@@ -4376,9 +4409,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
- sizeof(union dpcd_source_backlight_get)))
+ sizeof(union dpcd_source_backlight_get)) != DC_OK)
return false;
*backlight_millinits_avg =
@@ -4417,9 +4450,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_milli
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *) backlight_millinits,
- sizeof(uint32_t)))
+ sizeof(uint32_t)) != DC_OK)
return false;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 10d69ada88e3..0257a900fe2b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -246,20 +246,18 @@ struct dc_stream_status *dc_stream_get_status(
#ifndef TRIM_FSFT
/**
- * dc_optimize_timing() - dc to optimize timing
+ * dc_optimize_timing_for_fsft() - dc to optimize timing
*/
-bool dc_optimize_timing(
- struct dc_crtc_timing *timing,
+bool dc_optimize_timing_for_fsft(
+ struct dc_stream_state *pStream,
unsigned int max_input_rate_in_khz)
{
- //optimization is expected to assing a value to these:
- //timing->pix_clk_100hz
- //timing->v_front_porch
- //timing->v_total
- //timing->fast_transport_output_rate_100hz;
- timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+ struct dc *dc;
- return true;
+ dc = pStream->ctx->dc;
+
+ return (dc->hwss.optimize_timing_for_fsft &&
+ dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz));
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 845a3054f21f..0811f941f430 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -133,6 +133,13 @@ struct dc_vbios_funcs {
uint16_t (*pack_data_tables)(
struct dc_bios *dcb,
void *dst);
+
+ enum bp_result (*get_atom_dc_golden_table)(
+ struct dc_bios *dcb);
+
+ enum bp_result (*enable_lvtma_control)(
+ struct dc_bios *bios,
+ uint8_t uc_pwr_on);
};
struct bios_registers {
@@ -154,6 +161,7 @@ struct dc_bios {
struct dc_firmware_info fw_info;
bool fw_info_valid;
struct dc_vram_info vram_info;
+ struct dc_golden_table golden_table;
};
#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e4e85a159462..d9888f316da6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -233,7 +233,7 @@ struct dc_stream_state {
union stream_update_flags update_flags;
};
-#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+#define ABM_LEVEL_IMMEDIATE_DISABLE 255
struct dc_stream_update {
struct dc_stream_state *stream;
@@ -424,8 +424,8 @@ struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *dc_stream);
#ifndef TRIM_FSFT
-bool dc_optimize_timing(
- struct dc_crtc_timing *timing,
+bool dc_optimize_timing_for_fsft(
+ struct dc_stream_state *pStream,
unsigned int max_input_rate_in_khz);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 29fe5389f973..946ba929c6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -890,6 +890,20 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+struct dc_golden_table {
+ uint16_t dc_golden_table_ver;
+ uint32_t aux_dphy_rx_control0_val;
+ uint32_t aux_dphy_tx_control_val;
+ uint32_t aux_dphy_rx_control1_val;
+ uint32_t dc_gpio_aux_ctrl_0_val;
+ uint32_t dc_gpio_aux_ctrl_1_val;
+ uint32_t dc_gpio_aux_ctrl_2_val;
+ uint32_t dc_gpio_aux_ctrl_3_val;
+ uint32_t dc_gpio_aux_ctrl_4_val;
+ uint32_t dc_gpio_aux_ctrl_5_val;
+};
+
+
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
enum dc_gpu_mem_alloc_type {
DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 384389f0e2c3..66027d496778 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -38,7 +38,8 @@
#define AUX_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
- SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
#define HPD_REG_LIST(id)\
SRI(DC_HPD_CONTROL, HPD, id)
@@ -107,6 +108,7 @@
struct dce110_link_enc_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_DPHY_RX_CONTROL0;
+ uint32_t AUX_DPHY_RX_CONTROL1;
};
struct dce110_link_enc_hpd_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
index 70ec691e14d2..99c68ca9c7e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
@@ -49,7 +49,7 @@
#define DCN_PANEL_CNTL_REG_LIST()\
DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
- DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 82e67bd81f2d..5167d6b8a48d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -233,8 +233,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
true : false;
+ copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
- copy_settings_data->debug.bitfields.use_hw_lock_mgr = 0;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 49380ed3aeae..45c9e9027886 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -842,6 +842,17 @@ void dce110_edp_power_control(
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
+
+ if (ctx->dc->ctx->dmub_srv &&
+ ctx->dc->debug.dmub_command_table) {
+ if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_ON);
+ else
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_OFF);
+ }
+
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
if (!power_up)
@@ -919,8 +930,21 @@ void dce110_edp_backlight_control(
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
+
+ if (ctx->dc->ctx->dmub_srv &&
+ ctx->dc->debug.dmub_command_table) {
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLON);
+ else
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLOFF);
+ }
+
link_transmitter_control(ctx->dc_bios, &cntl);
+
+
if (enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_POST_T7_DELAY);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index da0897fe3b54..fa643ec5a876 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -390,6 +390,8 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
+ // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
+ // TODO: Update golden log header to reflect this name change
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
for (i = 0; i < pool->res_cap->num_dsc; i++) {
struct display_stream_compressor *dsc = pool->dscs[i];
@@ -400,7 +402,7 @@ void dcn10_log_hw_state(struct dc *dc,
dsc->inst,
s.dsc_clock_en,
s.dsc_slice_width,
- s.dsc_bytes_per_pixel);
+ s.dsc_bits_per_pixel);
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -1448,33 +1450,42 @@ void dcn10_init_hw(struct dc *dc)
void dcn10_power_down_on_boot(struct dc *dc)
{
int i = 0;
+ struct dc_link *edp_link;
- if (dc->config.power_down_display_on_boot) {
- struct dc_link *edp_link = get_edp_link(dc);
-
- if (edp_link &&
- edp_link->link_enc->funcs->is_dig_enabled &&
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
- dc->hwss.edp_power_control) {
- dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
- dc->hwss.edp_power_control(edp_link, false);
- } else {
- for (i = 0; i < dc->link_count; i++) {
- struct dc_link *link = dc->links[i];
-
- if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
- break;
- }
+ if (!dc->config.power_down_display_on_boot)
+ return;
+
+ edp_link = get_edp_link(dc);
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwseq->funcs.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
}
+
}
}
+
+ /*
+ * Call update_clocks with empty context
+ * to send DISPLAY_OFF
+ * Otherwise DISPLAY_OFF may not be asserted
+ */
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
}
void dcn10_reset_hw_ctx_wrap(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index cf59ab0034dc..04dabed5f1c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -31,10 +31,10 @@
#define TO_DCN10_LINK_ENC(link_encoder)\
container_of(link_encoder, struct dcn10_link_encoder, base)
-
#define AUX_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
- SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
#define HPD_REG_LIST(id)\
SRI(DC_HPD_CONTROL, HPD, id)
@@ -73,6 +73,7 @@ struct dcn10_link_enc_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_DPHY_RX_CONTROL0;
uint32_t AUX_DPHY_TX_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
};
struct dcn10_link_enc_hpd_registers {
@@ -443,7 +444,10 @@ struct dcn10_link_enc_registers {
type AUX_TX_PRECHARGE_LEN; \
type AUX_TX_PRECHARGE_SYMBOLS; \
type AUX_MODE_DET_CHECK_DELAY;\
- type DPCS_DBG_CBUS_DIS
+ type DPCS_DBG_CBUS_DIS;\
+ type AUX_RX_PRECHARGE_SKIP;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 17d5cb422025..8939541ad7af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1213,6 +1213,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
bool video_large = false;
bool desktop_large = false;
bool dcc_disabled = false;
+ bool mpo_enabled = false;
for (i = 0; i < context->stream_count; i++) {
if (context->stream_status[i].plane_count == 0)
@@ -1221,6 +1222,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
if (context->stream_status[i].plane_count > 2)
return DC_FAIL_UNSUPPORTED_1;
+ if (context->stream_status[i].plane_count > 1)
+ mpo_enabled = true;
+
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
context->stream_status[i].plane_states[j];
@@ -1244,6 +1248,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
}
}
+ /* Disable MPO in multi-display configurations. */
+ if (context->stream_count > 1 && mpo_enabled)
+ return DC_FAIL_UNSUPPORTED_1;
+
/*
* Workaround: On DCN10 there is UMC issue that causes underflow when
* playing 4k video on 4k desktop with video downscaled and single channel
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 07b2f9399671..842abb4c475b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
switch (packet_index) {
case 0:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC0_FRAME_UPDATE, 1);
+ AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC1_FRAME_UPDATE, 1);
+ AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC2_FRAME_UPDATE, 1);
+ AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC3_FRAME_UPDATE, 1);
+ AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC4_FRAME_UPDATE, 1);
+ AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC5_FRAME_UPDATE, 1);
+ AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC6_FRAME_UPDATE, 1);
+ AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC7_FRAME_UPDATE, 1);
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index ed385b1477be..30eae7459d50 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
@@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers {
type AFMT_GENERIC2_FRAME_UPDATE;\
type AFMT_GENERIC3_FRAME_UPDATE;\
type AFMT_GENERIC4_FRAME_UPDATE;\
+ type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_FRAME_UPDATE;\
type AFMT_GENERIC6_FRAME_UPDATE;\
type AFMT_GENERIC7_FRAME_UPDATE;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index ba50214d6c32..79b640e202eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -156,7 +156,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
- REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
+ REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 7725a406c16e..c8cfd3ba1c15 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1457,8 +1457,8 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
- pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
- && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -2498,3 +2498,30 @@ void dcn20_fpga_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
}
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz)
+{
+ unsigned int old_v_front_porch;
+ unsigned int old_v_total;
+ unsigned int max_input_rate_in_100hz;
+ unsigned long long new_v_total;
+
+ max_input_rate_in_100hz = max_input_rate_in_khz * 10;
+ if (max_input_rate_in_100hz < timing->pix_clk_100hz)
+ return false;
+
+ old_v_total = timing->v_total;
+ old_v_front_porch = timing->v_front_porch;
+
+ timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+ timing->pix_clk_100hz = max_input_rate_in_100hz;
+
+ new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
+
+ timing->v_total = new_v_total;
+ timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
+ return true;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 63ce763f148e..83220e34c1a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -132,5 +132,10 @@ int dcn20_init_sys_ctx(struct dce_hwseq *hws,
struct dc *dc,
struct dc_phy_addr_space_config *pa_config);
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz);
+#endif
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 2380392b916e..3dde6f26de47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -88,6 +88,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+#ifndef TRIM_FSFT
+ .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 8d209dae66e6..15c2ff264ff6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -309,7 +309,6 @@ bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
void enc2_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-
/*
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
@@ -333,9 +332,18 @@ void enc2_hw_init(struct link_encoder *enc)
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
- AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+ if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
+
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
+ } else {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ }
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index db09f40075c2..dcbf28dd72d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -167,7 +167,9 @@
LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
- LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -191,7 +193,10 @@
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
- LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh)
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
#define UNIPHY_DCN2_REG_LIST(id) \
SRI(CLOCK_ENABLE, SYMCLK, id), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 968a89bbcf24..9140b3fc767a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2223,7 +2223,7 @@ int dcn20_populate_dml_pipes_from_context(
if (!res_ctx->pipe_ctx[i].plane_state) {
pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
- pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
+ pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
@@ -2235,7 +2235,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
- pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
+ pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
@@ -3069,8 +3069,7 @@ void dcn20_calculate_dlg_params(
int pipe_cnt,
int vlevel)
{
- int i, j, pipe_idx, pipe_idx_unsplit;
- bool visited[MAX_PIPES] = { 0 };
+ int i, pipe_idx;
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3089,55 +3088,17 @@ void dcn20_calculate_dlg_params(
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
- /*
- * An artifact of dml pipe split/odm is that pipes get merged back together for
- * calculation. Therefore we need to only extract for first pipe in ascending index order
- * and copy into the other split half.
- */
- for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- if (!visited[pipe_idx]) {
- display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
-
- dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
- dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
- dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
- dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
- /*
- * j iterates inside pipes array, unlike i which iterates inside
- * pipe_ctx array
- */
- if (src->is_hsplit)
- for (j = pipe_idx + 1; j < pipe_cnt; j++) {
- display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
- display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
-
- if (src_j->is_hsplit && !visited[j]
- && src->hsplit_grp == src_j->hsplit_grp) {
- dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
- dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
- dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
- dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
- visited[j] = true;
- }
- }
- visited[pipe_idx] = true;
- pipe_idx_unsplit++;
- }
- pipe_idx++;
- }
-
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- ASSERT(visited[pipe_idx]);
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
@@ -3180,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 177d0dc8927a..b187f71afa65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -92,6 +92,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+#ifndef TRIM_FSFT
+ .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index c29326e9856a..2ae159e2dd6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -62,7 +62,7 @@ static const struct link_encoder_funcs dcn30_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn30_link_encoder_validate_output_with_stream,
- .hw_init = enc2_hw_init,
+ .hw_init = enc3_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
@@ -203,3 +203,54 @@ void dcn30_link_encoder_construct(
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
+
+#define AUX_REG(reg)\
+ (enc10->aux_regs->reg)
+
+#define AUX_REG_READ(reg_name) \
+ dm_read_reg(CTX, AUX_REG(reg_name))
+
+#define AUX_REG_WRITE(reg_name, val) \
+ dm_write_reg(CTX, AUX_REG(reg_name), val)
+void enc3_hw_init(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+/*
+ 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
+ 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
+ 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
+ 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
+ 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
+ 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
+ 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
+ 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
+*/
+
+/*
+ AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
+ AUX_RX_START_WINDOW = 1 [6:4]
+ AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
+ AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
+ AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
+ AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
+ AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
+ AUX_RX_DETECTION_THRESHOLD [30:28] = 1
+*/
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+
+ //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
+ // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
+ // 27MHz -> 0xd
+ // 100MHz -> 0x32
+ // 48MHz -> 0x18
+
+ // Set TMDS_CTL0 to 1. This is a legacy setting.
+ REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
+
+ dcn10_aux_initialize(enc10);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index 585d1ce63db1..2fbf879cd327 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -61,7 +61,10 @@
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
- LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
+
void dcn30_link_encoder_construct(
struct dcn20_link_encoder *enc20,
@@ -73,4 +76,6 @@ void dcn30_link_encoder_construct(
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask);
+void enc3_hw_init(struct link_encoder *enc);
+
#endif /* __DC_LINK_ENCODER__DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 1b354c219d0a..9afee7160490 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -26,6 +26,7 @@
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "dcn30_hwseq.h"
static const struct hw_sequencer_funcs dcn30_funcs = {
@@ -87,8 +88,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
- .set_backlight_level = dce110_set_backlight_level,
- .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 653a571e366d..ebe0cc5b833b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -491,6 +491,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dce110_aux_registers_shift aux_shift = {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index b54814f11b74..2beb284f89b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -63,6 +63,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3984,6 +3985,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 7916a7ea9336..afdd4f0d9d71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -154,23 +154,11 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimeP
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
+dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
-unsigned int get_vstartup_calculated(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes,
- unsigned int which_pipe)
-{
- unsigned int which_plane;
-
- recalculate_params(mode_lib, pipes, num_pipes);
- which_plane = mode_lib->vba.pipe_plane[which_pipe];
- return mode_lib->vba.VStartup[which_plane];
-}
-
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
@@ -479,7 +467,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
- mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
+ mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;;
+ mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
dout->dsc_slices;
mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 756d8eb1221c..21e5111ea7a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -98,16 +98,11 @@ dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us);
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us);
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us);
+dml_get_pipe_attr_decl(vstartup);
dml_get_pipe_attr_decl(vupdate_offset);
dml_get_pipe_attr_decl(vupdate_width);
dml_get_pipe_attr_decl(vready_offset);
-unsigned int get_vstartup_calculated(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes,
- unsigned int which_pipe);
-
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5994d2a33c40..947d6106f341 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -230,6 +230,8 @@ struct clk_mgr_funcs {
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+ void (*set_low_power_state)(struct clk_mgr *clk_mgr);
+
void (*init_clocks)(struct clk_mgr *clk_mgr);
void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 4e6e18bbef5d..72743058836d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -71,8 +71,9 @@ enum dentist_divider_range {
#define CTX \
clk_mgr->base.ctx
+
#define DC_LOGGER \
- clk_mgr->ctx->logger
+ clk_mgr->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 5915994f9eb8..f520e13aee4c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -55,7 +55,7 @@ struct dsc_optc_config {
struct dcn_dsc_state {
uint32_t dsc_clock_en;
uint32_t dsc_slice_width;
- uint32_t dsc_bytes_per_pixel;
+ uint32_t dsc_bits_per_pixel;
uint32_t dsc_slice_height;
uint32_t dsc_pic_width;
uint32_t dsc_pic_height;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 720ce5e458d8..3c986717dcd5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -116,6 +116,11 @@ struct hw_sequencer_funcs {
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
int num_pipes,
const struct dc_static_screen_params *events);
+#ifndef TRIM_FSFT
+ bool (*optimize_timing_for_fsft)(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz);
+#endif
/* Stream Related */
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index c30437ae8395..21011edea337 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -101,6 +101,13 @@ enum bp_pipe_control_action {
ASIC_PIPE_INIT
};
+enum bp_lvtma_control_action {
+ LVTMA_CONTROL_LCD_BLOFF = 2,
+ LVTMA_CONTROL_LCD_BLON = 3,
+ LVTMA_CONTROL_POWER_ON = 12,
+ LVTMA_CONTROL_POWER_OFF = 13
+};
+
struct bp_encoder_control {
enum bp_encoder_control_action action;
enum engine_id engine_id;
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 89ef9f6860e5..16df2a485dd0 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
*/
static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
+
return dc_fixpt_exp(
dc_fixpt_mul(
dc_fixpt_log(arg1),
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 4869d4562e4d..550f46e9b95f 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -66,6 +66,8 @@ enum link_training_result {
/* other failure during EQ step */
LINK_TRAINING_EQ_FAIL_EQ,
LINK_TRAINING_LQA_FAIL,
+ /* one of the CR,EQ or symbol lock is dropped */
+ LINK_TRAINING_LINK_LOSS,
};
struct link_training_settings {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 7a2500fbf3f2..d988533d4af5 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
+ * - Delta for CEIL: delta_from_mid_point_in_us_1
+ * - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
- (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
- mid_point_frames_floor < 2)) {
+ if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ /* Check for out of range.
+ * If using CEIL produces a value that is out of range,
+ * then we are forced to use FLOOR.
+ */
+ frames_to_insert = mid_point_frames_floor;
+ } else if (mid_point_frames_floor < 2) {
+ /* Check if FLOOR would result in non-LFC. In this case
+ * choose to use CEIL
+ */
+ frames_to_insert = mid_point_frames_ceil;
+ } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ /* If choosing CEIL results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose CEIL
+ */
frames_to_insert = mid_point_frames_ceil;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
- delta_from_mid_point_in_us_1;
} else {
+ /* If choosing FLOOR results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose FLOOR
+ */
frames_to_insert = mid_point_frames_floor;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
- delta_from_mid_point_in_us_2;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
@@ -829,10 +851,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
switch (packet_type) {
case PACKET_TYPE_FS_V3:
#ifndef TRIM_FSFT
+ // always populate with pixel rate.
build_vrr_infopacket_v3(
stream->signal, vrr,
stream->timing.flags.FAST_TRANSPORT,
- stream->timing.fast_transport_output_rate_100hz,
+ (stream->timing.flags.FAST_TRANSPORT) ?
+ stream->timing.fast_transport_output_rate_100hz :
+ stream->timing.pix_clk_100hz,
app_tf, infopacket);
#else
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index c2544c81dfb2..3e526c394f6c 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -941,7 +941,6 @@ struct atom_display_controller_info_v4_1
uint8_t reserved3[8];
};
-
struct atom_display_controller_info_v4_2
{
struct atom_common_table_header table_header;
@@ -976,6 +975,59 @@ struct atom_display_controller_info_v4_2
uint8_t reserved3[8];
};
+struct atom_display_controller_info_v4_4 {
+ struct atom_common_table_header table_header;
+ uint32_t display_caps;
+ uint32_t bootup_dispclk_10khz;
+ uint16_t dce_refclk_10khz;
+ uint16_t i2c_engine_refclk_10khz;
+ uint16_t dvi_ss_percentage; // in unit of 0.001%
+ uint16_t dvi_ss_rate_10hz;
+ uint16_t hdmi_ss_percentage; // in unit of 0.001%
+ uint16_t hdmi_ss_rate_10hz;
+ uint16_t dp_ss_percentage; // in unit of 0.001%
+ uint16_t dp_ss_rate_10hz;
+ uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t ss_reserved;
+ uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
+ uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
+ uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+ uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+ uint16_t dpphy_refclk_10khz;
+ uint16_t hw_chip_id;
+ uint8_t dcnip_min_ver;
+ uint8_t dcnip_max_ver;
+ uint8_t max_disp_pipe_num;
+ uint8_t max_vbios_active_disp_pipum;
+ uint8_t max_ppll_num;
+ uint8_t max_disp_phy_num;
+ uint8_t max_aux_pairs;
+ uint8_t remotedisplayconfig;
+ uint32_t dispclk_pll_vco_freq;
+ uint32_t dp_ref_clk_freq;
+ uint32_t max_mclk_chg_lat; // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us)
+ uint32_t max_sr_exit_lat; // Worst case memory self refresh exit time, units of 100ns of ns (0.1us)
+ uint32_t max_sr_enter_exit_lat; // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us)
+ uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx
+ uint16_t dc_golden_table_ver;
+ uint32_t reserved3[3];
+};
+
+struct atom_dc_golden_table_v1
+{
+ uint32_t aux_dphy_rx_control0_val;
+ uint32_t aux_dphy_tx_control_val;
+ uint32_t aux_dphy_rx_control1_val;
+ uint32_t dc_gpio_aux_ctrl_0_val;
+ uint32_t dc_gpio_aux_ctrl_1_val;
+ uint32_t dc_gpio_aux_ctrl_2_val;
+ uint32_t dc_gpio_aux_ctrl_3_val;
+ uint32_t dc_gpio_aux_ctrl_4_val;
+ uint32_t dc_gpio_aux_ctrl_5_val;
+ uint32_t reserved[23];
+};
enum dce_info_caps_def
{
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 838a369c9ec3..0826625573dc 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -133,6 +133,78 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->dpm_set_vcn_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->vcn_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->vcn_gated, !enable);
+
+ return ret;
+}
+
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ mutex_lock(&power_gate->vcn_gate_lock);
+
+ ret = smu_dpm_set_vcn_enable_locked(smu, enable);
+
+ mutex_unlock(&power_gate->vcn_gate_lock);
+
+ return ret;
+}
+
+static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->dpm_set_jpeg_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->jpeg_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->jpeg_gated, !enable);
+
+ return ret;
+}
+
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ mutex_lock(&power_gate->jpeg_gate_lock);
+
+ ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
+
+ mutex_unlock(&power_gate->jpeg_gate_lock);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -353,6 +425,45 @@ static int smu_early_init(void *handle)
return smu_set_funcs(adev);
}
+static int smu_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int vcn_gate, jpeg_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_default_dpm_table)
+ return 0;
+
+ mutex_lock(&power_gate->vcn_gate_lock);
+ mutex_lock(&power_gate->jpeg_gate_lock);
+
+ vcn_gate = atomic_read(&power_gate->vcn_gated);
+ jpeg_gate = atomic_read(&power_gate->jpeg_gated);
+
+ ret = smu_dpm_set_vcn_enable_locked(smu, true);
+ if (ret)
+ goto err0_out;
+
+ ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+ if (ret)
+ goto err1_out;
+
+ ret = smu->ppt_funcs->set_default_dpm_table(smu);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Failed to setup default dpm clock tables!\n");
+
+ smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
+err1_out:
+ smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
+err0_out:
+ mutex_unlock(&power_gate->jpeg_gate_lock);
+ mutex_unlock(&power_gate->vcn_gate_lock);
+
+ return ret;
+}
+
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -579,6 +690,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
if (ret)
return ret;
+ ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -586,6 +701,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
{
int ret;
+ smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+
ret = smu_free_memory_pool(smu);
if (ret)
return ret;
@@ -643,6 +760,11 @@ static int smu_sw_init(void *handle)
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+ mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
+ mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
+
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
@@ -734,7 +856,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
uint32_t pcie_gen = 0, pcie_width = 0;
int ret;
- if (smu_is_dpm_running(smu) && adev->in_suspend) {
+ if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
return 0;
}
@@ -844,10 +966,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
- if (ret)
- return ret;
-
ret = smu_disable_umc_cdr_12gbps_workaround(smu);
if (ret) {
dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
@@ -1046,8 +1164,6 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- smu_i2c_fini(smu, &adev->pm.smu_i2c);
-
cancel_work_sync(&smu->throttling_logging_work);
ret = smu_disable_thermal_alert(smu);
@@ -1590,6 +1706,9 @@ int smu_set_mp1_state(struct smu_context *smu,
}
ret = smu_send_smc_msg(smu, msg, NULL);
+ /* some asics may not support those messages */
+ if (ret == -EINVAL)
+ ret = 0;
if (ret)
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
@@ -1944,6 +2063,10 @@ int smu_read_sensor(struct smu_context *smu,
mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->read_sensor)
+ if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
+ goto unlock;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
@@ -1966,7 +2089,7 @@ int smu_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
- *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
+ *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -1974,11 +2097,12 @@ int smu_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- if (smu->ppt_funcs->read_sensor)
- ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+ *size = 0;
+ ret = -EOPNOTSUPP;
break;
}
+unlock:
mutex_unlock(&smu->mutex);
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 3b9182c8c53f..fb962b9ceffb 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1849,8 +1849,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -1861,7 +1859,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
return ret;
}
}
- power_gate->vcn_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
@@ -1870,7 +1867,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
return ret;
}
}
- power_gate->vcn_gated = true;
}
return ret;
@@ -2080,22 +2076,11 @@ static const struct i2c_algorithm arcturus_i2c_algo = {
.functionality = arcturus_i2c_func,
};
-static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- return control->dev.parent == &adev->pdev->dev;
-}
-
static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res;
- /* smu_i2c_eeprom_init may be called twice in sriov */
- if (arcturus_i2c_adapter_is_added(control))
- return 0;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2111,9 +2096,6 @@ static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter
static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
{
- if (!arcturus_i2c_adapter_is_added(control))
- return;
-
i2c_del_adapter(control);
}
@@ -2222,14 +2204,17 @@ static const struct throttling_logging_label {
};
static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
{
+ int ret;
int throttler_idx, throtting_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
- arcturus_get_smu_metrics_data(smu,
- METRICS_THROTTLER_STATUS,
- &throttler_status);
+ ret = arcturus_get_smu_metrics_data(smu,
+ METRICS_THROTTLER_STATUS,
+ &throttler_status);
+ if (ret)
+ return;
memset(log_buf, 0, sizeof(log_buf));
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index c9cfe90a2947..9ee8cf8267c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -204,8 +204,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != clock) {
+ if (clock && smu10_data->deep_sleep_dcefclk != clock) {
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -219,8 +218,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->dcf_actual_hard_min_freq &&
- smu10_data->dcf_actual_hard_min_freq != clock) {
+ if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
@@ -234,8 +232,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->f_actual_hard_min_freq &&
- smu10_data->f_actual_hard_min_freq != clock) {
+ if (clock && smu10_data->f_actual_hard_min_freq != clock) {
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index 468bdd6f6697..d572ba4ec9b1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -363,17 +363,19 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v2_information *pp_table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (high > tdp_table->usSoftwareShutdownTemp)
+ high = tdp_table->usSoftwareShutdownTemp;
if (low > high)
return -EINVAL;
@@ -382,8 +384,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index c15b9756025d..7ace439dcde7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (high > pptable_information->us_software_shutdown_temp)
+ high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
@@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b8839641770..ea70d736f6a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
- uint64_t features_enabled;
- int i;
- bool enabled;
- int ret = 0;
+ int i, ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
@@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
- ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
- PP_ASSERT_WITH_CODE(!ret,
- "[DisableAllSMUFeatures] Failed to get enabled smc features!",
- return ret);
-
- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
- true : false;
- data->smu_features[i].enabled = enabled;
- data->smu_features[i].supported = enabled;
- }
+ for (i = 0; i < GNLD_FEATURES_MAX; i++)
+ data->smu_features[i].enabled = 0;
return 0;
}
@@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = true;
data->vce_power_gated = true;
-
- if (data->smu_features[GNLD_DPM_UVD].enabled)
- data->uvd_power_gated = false;
-
- if (data->smu_features[GNLD_DPM_VCE].enabled)
- data->vce_power_gated = false;
}
static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
@@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
- uint64_t features_enabled;
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ uint64_t features_enabled, features_to_enable, features_to_disable;
+ int i, ret = 0;
+ bool enabled;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
@@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
}
+ /* Update the cached feature enablement state */
+ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
+ true : false;
+ data->smu_features[i].enabled = enabled;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
index 7add2f60f49c..364162ddaa9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
@@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (high > pptable_information->us_software_shutdown_temp)
+ high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
@@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 28312d6dc187..074458eb5407 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -292,8 +292,10 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
- bool vcn_gated;
- bool jpeg_gated;
+ atomic_t vcn_gated;
+ atomic_t jpeg_gated;
+ struct mutex vcn_gate_lock;
+ struct mutex jpeg_gate_lock;
};
struct smu_power_context {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
index b2232e24d82f..aa2708fccb6d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x33
+#define SMU11_DRIVER_IF_VERSION 0x34
#define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
@@ -968,9 +968,15 @@ typedef struct {
typedef struct {
uint32_t CurrClock[PPCLK_COUNT];
- uint16_t AverageGfxclkFrequency;
- uint16_t AverageFclkFrequency;
- uint16_t AverageUclkFrequency ;
+
+ uint16_t AverageGfxclkFrequencyPreDs;
+ uint16_t AverageGfxclkFrequencyPostDs;
+ uint16_t AverageFclkFrequencyPreDs;
+ uint16_t AverageFclkFrequencyPostDs;
+ uint16_t AverageUclkFrequencyPreDs ;
+ uint16_t AverageUclkFrequencyPostDs ;
+
+
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
uint8_t CurrSocVoltageOffset ;
@@ -988,6 +994,7 @@ typedef struct {
uint16_t TemperatureLiquid0 ;
uint16_t TemperatureLiquid1 ;
uint16_t TemperaturePlx ;
+ uint16_t Padding16 ;
uint32_t ThrottlerStatus ;
uint8_t LinkDpmLevel;
@@ -1006,8 +1013,10 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
- uint16_t padding16_2;
+ uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
+ uint8_t PcieRate ;
+ uint8_t PcieWidth ;
+
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 429f5aa8924a..6a42331aba8a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -30,8 +30,8 @@
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
/* MP Apertures */
#define MP0_Public 0x03800000
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 6aaf483858a0..9f62af9abd23 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -785,8 +785,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -796,14 +794,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (ret)
return ret;
}
- power_gate->vcn_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
- power_gate->vcn_gated = true;
}
return ret;
@@ -811,8 +807,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -821,14 +815,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (ret)
return ret;
}
- power_gate->jpeg_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
- power_gate->jpeg_gated = true;
}
return ret;
@@ -2457,22 +2449,11 @@ static const struct i2c_algorithm navi10_i2c_algo = {
.functionality = navi10_i2c_func,
};
-static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- return control->dev.parent == &adev->pdev->dev;
-}
-
static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res;
- /* smu_i2c_eeprom_init may be called twice in sriov */
- if (navi10_i2c_adapter_is_added(control))
- return 0;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2488,9 +2469,6 @@ static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *
static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
{
- if (!navi10_i2c_adapter_is_added(control))
- return;
-
i2c_del_adapter(control);
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 575ae4be98a2..dbb676c482fd 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -459,8 +459,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -470,14 +468,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (ret)
return ret;
}
- power_gate->vcn_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
- power_gate->vcn_gated = true;
}
return ret;
@@ -485,8 +481,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -495,14 +489,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (ret)
return ret;
}
- power_gate->jpeg_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
- power_gate->jpeg_gated = true;
}
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index 59da3ca2a4ca..638760839c15 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -70,14 +70,16 @@
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
+#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
+
static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1),
- MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1),
- MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
- MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
@@ -85,42 +87,44 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
- MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
- MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
- MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1),
- MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1),
- MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
- MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
- MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1),
- MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1),
- MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
- MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
- MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1),
- MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1),
- MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1),
- MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1),
- MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1),
- MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1),
- MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1),
- MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
+ MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0),
+ MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0),
+ MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0),
+ MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0),
+ MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
- MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
- MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
- MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
- MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1),
- MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
+ MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -442,13 +446,16 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->CurrClock[PPCLK_DCEFCLK];
break;
case METRICS_AVERAGE_GFXCLK:
- *value = metrics->AverageGfxclkFrequency;
+ if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+ *value = metrics->AverageGfxclkFrequencyPostDs;
+ else
+ *value = metrics->AverageGfxclkFrequencyPreDs;
break;
case METRICS_AVERAGE_FCLK:
- *value = metrics->AverageFclkFrequency;
+ *value = metrics->AverageFclkFrequencyPostDs;
break;
case METRICS_AVERAGE_UCLK:
- *value = metrics->AverageUclkFrequency;
+ *value = metrics->AverageUclkFrequencyPostDs;
break;
case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->AverageGfxActivity;
@@ -760,10 +767,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
struct amdgpu_device *adev = smu->adev;
-
int ret = 0;
if (enable) {
@@ -772,27 +776,25 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->vcn.num_vcn_inst > 1) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
0x10000, NULL);
if (ret)
return ret;
}
}
- power_gate->vcn_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
if (ret)
return ret;
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->vcn.num_vcn_inst > 1) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
0x10000, NULL);
if (ret)
return ret;
}
}
- power_gate->vcn_gated = true;
}
return ret;
@@ -800,8 +802,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
if (enable) {
@@ -810,14 +810,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
if (ret)
return ret;
}
- power_gate->jpeg_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
- power_gate->jpeg_gated = true;
}
return ret;
@@ -1735,6 +1733,11 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
return ret;
}
+static int sienna_cichlid_run_btc(struct smu_context *smu)
+{
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+}
+
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2624,22 +2627,11 @@ static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
.functionality = sienna_cichlid_i2c_func,
};
-static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- return control->dev.parent == &adev->pdev->dev;
-}
-
static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res;
- /* smu_i2c_eeprom_init may be called twice in sriov */
- if (sienna_cichlid_i2c_adapter_is_added(control))
- return 0;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2655,9 +2647,6 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_a
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
{
- if (!sienna_cichlid_i2c_adapter_is_added(control))
- return;
-
i2c_del_adapter(control);
}
@@ -2736,6 +2725,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .run_btc = sienna_cichlid_run_btc,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/powerplay/smu_cmn.c
index be4b678d0e60..5c23c44c33bd 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_cmn.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_cmn.c
@@ -166,7 +166,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
switch (type) {
case CMN2ASIC_MAPPING_MSG:
- if (index > SMU_MSG_MAX_COUNT ||
+ if (index >= SMU_MSG_MAX_COUNT ||
!smu->message_map)
return -EINVAL;
@@ -181,7 +181,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return msg_mapping.map_to;
case CMN2ASIC_MAPPING_CLK:
- if (index > SMU_CLK_COUNT ||
+ if (index >= SMU_CLK_COUNT ||
!smu->clock_map)
return -EINVAL;
@@ -192,7 +192,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_FEATURE:
- if (index > SMU_FEATURE_COUNT ||
+ if (index >= SMU_FEATURE_COUNT ||
!smu->feature_map)
return -EINVAL;
@@ -203,7 +203,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_TABLE:
- if (index > SMU_TABLE_COUNT ||
+ if (index >= SMU_TABLE_COUNT ||
!smu->table_map)
return -EINVAL;
@@ -214,7 +214,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_PWR:
- if (index > SMU_POWER_SOURCE_COUNT ||
+ if (index >= SMU_POWER_SOURCE_COUNT ||
!smu->pwr_src_map)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index d0deaefd3feb..264073d4e263 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -60,7 +60,6 @@
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
-#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
@@ -77,8 +76,6 @@
#define smu_get_dal_power_level(smu, clocks) smu_ppt_funcs(get_dal_power_level, 0, smu, clocks)
#define smu_get_perf_level(smu, designation, level) smu_ppt_funcs(get_perf_level, 0, smu, designation, level)
#define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
-#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
-#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index fd82402065e6..7b950a582a28 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1029,6 +1029,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 02159ca29fa2..c18169aa59ce 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return ci_is_smc_ram_running(hwmgr);
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS,
+ VOLTAGE_CONTROLLER_ON))
+ ? true : false;
}
static int ci_smu_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index ca570b135478..e9de542f9b7c 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -532,7 +532,7 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
- /* fall through */
+ fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
@@ -869,7 +869,7 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
- /* fall through */
+ fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
@@ -1324,7 +1324,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
break;
case MW_RESTART:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
- /* fall through - to a new start */
+ fallthrough; /* to a new start */
case MW_START:
/* writeback started, need to emulate one-shot mode */
hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dd12b55d57a2..6a9fba051d13 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -238,7 +238,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->dp501_fw_addr = NULL;
}
}
- /* fallthrough */
+ fallthrough;
case 0x0c:
ast->tx_chip_type = AST_TX_DP501;
}
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index ce94f797d090..66b67402f1ac 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -409,7 +409,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
switch (data_type) {
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
- fallthrough;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
if (xfer->msg->rx_len > 1) {
/* read second byte */
@@ -418,7 +417,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
}
fallthrough;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
- fallthrough;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
if (xfer->msg->rx_len > 0) {
/* read first byte */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index d7e65c869415..9fef6413741d 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -61,10 +61,10 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
switch (hparms->channels) {
case 7 ... 8:
conf0 |= HDMI_AUD_CONF0_I2S_EN3;
- /* Fall-thru */
+ fallthrough;
case 5 ... 6:
conf0 |= HDMI_AUD_CONF0_I2S_EN2;
- /* Fall-thru */
+ fallthrough;
case 3 ... 4:
conf0 |= HDMI_AUD_CONF0_I2S_EN1;
/* Fall-thru */
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 86b9f0f87a14..5b6e19ecbc84 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -604,13 +604,13 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
DRM_DEV_ERROR(pdata->dev,
"Unexpected max rate (%#x); assuming 5.4 GHz\n",
(int)dpcd_val);
- /* fall through */
+ fallthrough;
case DP_LINK_BW_5_4:
rate_valid[7] = 1;
- /* fall through */
+ fallthrough;
case DP_LINK_BW_2_7:
rate_valid[4] = 1;
- /* fall through */
+ fallthrough;
case DP_LINK_BW_1_62:
rate_valid[1] = 1;
break;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f68c69a45752..9e1ad493e689 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -34,6 +34,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
@@ -3106,7 +3107,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
@@ -3246,7 +3247,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
}
unlock:
- DRM_MODESET_LOCK_ALL_END(ctx, err);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
if (err)
return ERR_PTR(err);
@@ -3327,7 +3328,7 @@ int drm_atomic_helper_resume(struct drm_device *dev,
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
- DRM_MODESET_LOCK_ALL_END(ctx, err);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
drm_atomic_state_put(state);
return err;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index a0735fbc144b..7a01d0918861 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -537,7 +537,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
switch (map->type) {
case _DRM_REGISTERS:
iounmap(map->handle);
- /* FALLTHROUGH */
+ fallthrough;
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
break;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index c93123ff7c21..138ff34b31db 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -294,7 +294,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
crtc->gamma_size, &ctx);
out:
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 283bcc4362ca..aecdd7ea26dc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -588,7 +588,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
return -EACCES;
- mutex_lock(&crtc->dev->mode_config.mutex);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
@@ -756,8 +755,7 @@ out:
fb = NULL;
mode = NULL;
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
- mutex_unlock(&crtc->dev->mode_config.mutex);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a3c82e726057..092c8c985911 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -492,7 +492,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
case DP_DS_16BPC:
return 16;
}
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 09b32289497e..67dd72ea200e 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4308,11 +4308,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
{
int ret;
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
+ if (slots < 0)
return false;
- if (slots < 0)
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
+ if (!port)
return false;
if (port->vcpi.vcpi > 0) {
@@ -4328,6 +4328,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
if (ret) {
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+ drm_dp_mst_topology_put_port(port);
goto out;
}
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
@@ -5039,8 +5040,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
crtc = conn_state->crtc;
- if (WARN_ON(!crtc))
- return -EINVAL;
+ if (!crtc)
+ continue;
if (!drm_dp_mst_dsc_aux_for_port(pos->port))
continue;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bc38322f306e..13068fdf4331 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -815,8 +815,7 @@ static void drm_dev_release(struct kref *ref)
drm_managed_release(dev);
- if (dev->managed.final_kfree)
- kfree(dev->managed.final_kfree);
+ kfree(dev->managed.final_kfree);
}
/**
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d4e7c8370565..19d73868490e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -879,6 +879,9 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 901b078abf40..db05f386a709 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -428,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
out_unref:
drm_mode_object_put(obj);
out:
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
@@ -470,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
break;
}
drm_property_change_valid_put(prop, ref);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 14b6f7638728..501b4fe55a3d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1930,7 +1930,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
default:
WARN(1, "Invalid aspect ratio (0%x) on mode\n",
in->picture_aspect_ratio);
- /* fall through */
+ fallthrough;
case HDMI_PICTURE_ASPECT_NONE:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index d00ea384dcbf..58f5dc2f6dd5 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Asus T103HAF */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index b7b90b3a2e38..affe1cfed009 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -792,7 +792,7 @@ static int setplane_internal(struct drm_plane *plane,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d5a4cd85a0f6..c6404b8d067f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -337,9 +337,16 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
- gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
- gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
+
+ /*
+ * Reading these two registers on GC600 rev 0x19 result in a
+ * unhandled fault: external abort on non-linefetch
+ */
+ if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
+ }
/*
* !!!! HACK ALERT !!!!
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 4e3e95dce6d8..cd46c882269c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
u32 dma_addr;
int change;
+ /* block scheduler */
+ drm_sched_stop(&gpu->sched, sched_job);
+
/*
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- return;
+ goto out_no_timeout;
/*
* If the GPU is still making forward progress on the front-end (which
@@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- return;
+ goto out_no_timeout;
}
- /* block scheduler */
- drm_sched_stop(&gpu->sched, sched_job);
-
if(sched_job)
drm_sched_increase_karma(sched_job);
@@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_resubmit_jobs(&gpu->sched);
+out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 7a6f6df5e954..b38e9b592b8a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -987,10 +987,10 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
switch (length) {
case 3:
reg |= payload[2] << 16;
- /* Fall through */
+ fallthrough;
case 2:
reg |= payload[1] << 8;
- /* Fall through */
+ fallthrough;
case 1:
reg |= payload[0];
exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
@@ -1038,7 +1038,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
payload[1] = reg >> 16;
++xfer->rx_done;
}
- /* Fall through */
+ fallthrough;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
payload[0] = reg >> 8;
@@ -1082,10 +1082,10 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
switch (length) {
case 3:
payload[2] = (reg >> 16) & 0xff;
- /* Fall through */
+ fallthrough;
case 2:
payload[1] = (reg >> 8) & 0xff;
- /* Fall through */
+ fallthrough;
case 1:
payload[0] = reg & 0xff;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 56a2b47e1af7..5147f5929be7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -92,7 +92,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
- fbi->screen_base = exynos_gem->kvaddr + offset;
+ fbi->screen_buffer = exynos_gem->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 7445748288da..74e926abeff0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -40,7 +40,7 @@ struct exynos_drm_gem {
unsigned int flags;
unsigned long size;
void *cookie;
- void __iomem *kvaddr;
+ void *kvaddr;
dma_addr_t dma_addr;
unsigned long dma_attrs;
struct sg_table *sgt;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 86fac677fe69..3c6d9f3913d5 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -101,19 +101,19 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
break;
case DRM_FORMAT_ARGB8888:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
bpp = FSL_DCU_ARGB8888;
break;
case DRM_FORMAT_ARGB4444:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB4444:
bpp = FSL_DCU_ARGB4444;
break;
case DRM_FORMAT_ARGB1555:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB1555:
bpp = FSL_DCU_ARGB1555;
break;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8c55f5bee9ab..f4053dd6bde9 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -712,7 +712,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
switch (intel_dsi->pixel_format) {
default:
MISSING_CASE(intel_dsi->pixel_format);
- /* fallthrough */
+ fallthrough;
case MIPI_DSI_FMT_RGB565:
tmp |= PIX_FMT_RGB565;
break;
@@ -739,7 +739,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
switch (intel_dsi->video_mode_format) {
default:
MISSING_CASE(intel_dsi->video_mode_format);
- /* fallthrough */
+ fallthrough;
case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
tmp |= VIDEO_MODE_SYNC_EVENT;
break;
@@ -792,7 +792,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
switch (pipe) {
default:
MISSING_CASE(pipe);
- /* fallthrough */
+ fallthrough;
case PIPE_A:
tmp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c53c85d38fa5..a0a41ec5c341 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -905,7 +905,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
drm_dbg_kms(&dev_priv->drm,
"VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
- /* fallthrough */
+ fallthrough;
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
break;
@@ -925,7 +925,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
drm_dbg_kms(&dev_priv->drm,
"VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
- /* fallthrough */
+ fallthrough;
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
@@ -1775,7 +1775,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
switch (child->hdmi_max_data_rate) {
default:
MISSING_CASE(child->hdmi_max_data_rate);
- /* fall through */
+ fallthrough;
case HDMI_MAX_DATA_RATE_PLATFORM:
max_tmds_clock = 0;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index bb91dace304a..91a8161e7c05 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -326,7 +326,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
default:
drm_err(&dev_priv->drm,
"Unknown pnv display core clock 0x%04x\n", gcfgc);
- /* fall through */
+ fallthrough;
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
cdclk_config->cdclk = 133333;
break;
@@ -766,7 +766,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
switch (cdclk) {
default:
MISSING_CASE(cdclk);
- /* fall through */
+ fallthrough;
case 337500:
val |= LCPLL_CLK_FREQ_337_5_BDW;
break;
@@ -1042,7 +1042,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
- /* fall through */
+ fallthrough;
case 308571:
case 337500:
freq_select = CDCLK_FREQ_337_308;
@@ -1333,7 +1333,7 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv,
switch (dssm) {
default:
MISSING_CASE(dssm);
- /* fall through */
+ fallthrough;
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
cdclk_config->ref = 24000;
break;
@@ -1561,7 +1561,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
- /* fall through */
+ fallthrough;
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index eccaa79cb4a9..6968de4f3477 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -52,7 +52,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
- /* fall through */
+ fallthrough;
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
@@ -320,7 +320,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
break;
default:
MISSING_CASE(lane_count);
- /* fall-through */
+ fallthrough;
case 4:
lane_mask = PWR_UP_ALL_LANES;
break;
@@ -337,7 +337,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
break;
default:
MISSING_CASE(lane_count);
- /* fall-through */
+ fallthrough;
case 4:
lane_mask = PWR_UP_ALL_LANES;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2c484b55bcdf..a49ff3a1a63c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1888,7 +1888,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
- /* fallthrough */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
*pipe_mask = BIT(PIPE_A);
@@ -4268,7 +4268,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->hdmi_scrambling = true;
if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
pipe_config->hdmi_high_tmds_clock_ratio = true;
- /* fall through */
+ fallthrough;
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 729ec6e0d43a..68325678f5ef 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2029,12 +2029,12 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case I915_FORMAT_MOD_Y_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
@@ -2043,7 +2043,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
case 1:
@@ -2185,7 +2185,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_semiplanar_uv_plane(fb, color_plane))
return intel_tile_row_size(fb, color_plane);
- /* Fall-through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
@@ -2194,7 +2194,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 12 &&
is_semiplanar_uv_plane(fb, color_plane))
return intel_tile_row_size(fb, color_plane);
- /* Fall-through */
+ fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
@@ -6211,7 +6211,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB16161616F:
if (INTEL_GEN(dev_priv) >= 11)
break;
- /* fall through */
+ fallthrough;
default:
drm_dbg_kms(&dev_priv->drm,
"[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -10896,7 +10896,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
break;
default:
MISSING_CASE(ddi_pll_sel);
- /* fall through */
+ fallthrough;
case PORT_CLK_SEL_NONE:
return;
}
@@ -10956,10 +10956,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
drm_WARN(dev, 1,
"unknown pipe linked to transcoder %s\n",
transcoder_name(panel_transcoder));
- /* fall through */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
- /* fall through */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -13183,7 +13183,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
- /* else, fall through */
+ fallthrough;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -14930,7 +14930,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (any_ms && !check_digital_port_conflicts(state)) {
drm_dbg_kms(&dev_priv->drm,
"rejecting conflicting digital port configuration\n");
- ret = EINVAL;
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 3644752cc5ec..5a5cfe25085b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -2044,9 +2044,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
- struct intel_encoder *encoder =
- intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_encoder *encoder;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!encoder)
+ return -ENODEV;
if (connector->status != connector_status_connected)
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 0c713e83274d..e0fcb89c736b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4147,6 +4147,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
+ .name = "TC cold off",
+ .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+ .ops = &tgl_tc_cold_off_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
@@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
- {
- .name = "TC cold off",
- .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
- .ops = &tgl_tc_cold_off_ops,
- .id = DISP_PW_ID_NONE,
- },
};
static const struct i915_power_well_desc rkl_power_wells[] = {
@@ -5240,10 +5240,10 @@ struct buddy_page_mask {
};
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
- { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
{}
};
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index aeb6ee395cce..afa7a378b31d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -892,7 +892,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
refclk = dev_priv->dpll.ref_clks.nssc;
break;
}
- /* fall through */
+ fallthrough;
case WRPLL_REF_PCH_SSC:
/*
* We could calculate spread here, but our checking
@@ -2977,7 +2977,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
switch (dev_priv->dpll.ref_clks.nssc) {
default:
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
- /* fall-through */
+ fallthrough;
case 19200:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
@@ -2992,7 +2992,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
switch (dev_priv->dpll.ref_clks.nssc) {
default:
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
- /* fall-through */
+ fallthrough;
case 19200:
case 38400:
*pll_params = icl_tbt_pll_19_2MHz_values;
@@ -3120,7 +3120,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
switch (div1) {
default:
MISSING_CASE(div1);
- /* fall through */
+ fallthrough;
case 2:
hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index aaed9eb3b56c..4072d7062efd 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -229,7 +229,7 @@ int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
case DRM_MODE_SCALE_NONE:
WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
- /* fall through */
+ fallthrough;
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->crtc_hdisplay;
@@ -1929,7 +1929,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return retval;
}
- level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
CRC_PMIC_PWM_PERIOD_NS);
panel->backlight.level =
intel_panel_compute_brightness(connector, level);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 2da4388e1540..5e9fb349c829 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1531,7 +1531,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
default:
drm_WARN(&dev_priv->drm, 1,
"unknown pixel multiplier specified\n");
- /* fall through */
+ fallthrough;
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -2549,19 +2549,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index d03860fef2d7..c89f5f7ccb06 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2147,7 +2147,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_RGB565:
if (INTEL_GEN(dev_priv) >= 11)
break;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_C8:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
@@ -2702,7 +2702,7 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
@@ -2733,7 +2733,7 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
@@ -2768,7 +2768,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
@@ -2801,7 +2801,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
@@ -2819,7 +2819,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XVYU2101010:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
@@ -2834,7 +2834,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
@@ -2860,7 +2860,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
return false;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
@@ -2877,7 +2877,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2889,7 +2889,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P016:
if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
@@ -2910,7 +2910,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 5b5dc86a5737..8f67aef18b2d 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -159,7 +159,7 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
switch (lane_mask) {
default:
MISSING_CASE(lane_mask);
- /* fall-through */
+ fallthrough;
case 0x1:
case 0x2:
case 0x4:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b23368529a40..753f82d87a31 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -209,7 +209,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
switch (err) {
default:
WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
- /* fallthrough */
+ fallthrough;
case -EIO: /* shmemfs failure from swap device */
case -EFAULT: /* purged object */
case -ENODEV: /* bad object, how did you get here! */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 7050519c87a4..d15ff6748a50 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -276,7 +276,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough - to use PAGE_KERNEL anyway */
+ fallthrough; /* to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index e0f21f12d3ce..0be5e8683337 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -249,7 +249,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
- /* fall through */
+ fallthrough;
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
@@ -416,7 +416,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
case 4:
if (!IS_G4X(i915))
break;
- /* fall through */
+ fallthrough;
case 5:
g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
@@ -445,7 +445,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
break;
default:
MISSING_CASE(INTEL_GEN(i915));
- /* fall-through */
+ fallthrough;
case 11:
case 12:
icl_get_stolen_reserved(i915, uncore,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index e946032b13e4..2c2bf24140c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
locked = 1;
}
ret = pin_user_pages_remote
- (work->task, mm,
+ (mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index dd1a42c4d344..26087dd79782 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -213,7 +213,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
break;
default:
MISSING_CASE(class);
- /* fall through */
+ fallthrough;
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 62979ea591f0..99e28d9021e8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1437,7 +1437,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
switch (vma->ggtt_view.type) {
default:
GEM_BUG_ON(vma->ggtt_view.type);
- /* fall through */
+ fallthrough;
case I915_GGTT_VIEW_NORMAL:
vma->pages = vma->obj->mm.pages;
return 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 94915f668715..898593ca4889 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -100,7 +100,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
*/
default:
GEM_BUG_ON(engine->id);
- /* fallthrough */
+ fallthrough;
case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 072725a448db..ad86c5eb5bba 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
+ pci_power_t pwr;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
@@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
+
+ if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
+ pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
+ & PCI_PM_CTRL_STATE_MASK);
+ if (pwr == PCI_D3hot)
+ vgpu->d3_entered = true;
+ gvt_dbg_core("vgpu-%d power status changed to %d\n",
+ vgpu->id, pwr);
+ }
}
/**
@@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
+ u8 next;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
@@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
+
+ /* PM Support */
+ vgpu->cfg_space.pmcsr_off = 0;
+ if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
+ next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
+ do {
+ if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
+ vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
+ break;
+ }
+ next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
+ } while (next);
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 210016192ce7..a3a4305eda01 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu);
}
-static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 320b8d6ad92f..52d0d88abd86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index a4a6db6b7f90..ff7f2515a6fe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar {
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+ u32 pmcsr_off;
};
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
@@ -198,6 +199,8 @@ struct intel_vgpu {
struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES];
+ /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
+ bool d3_entered;
struct dentry *debugfs;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 63bba7b4bb2f..05f3bc98d242 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1226,7 +1226,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
- /* fall through */
+ fallthrough;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 7d361623ff67..8fa9b31a2484 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
intel_gvt_deactivate_vgpu(vgpu);
mutex_lock(&vgpu->vgpu_lock);
+ vgpu->d3_entered = false;
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
@@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
+ vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
@@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- intel_vgpu_invalidate_ppgtt(vgpu);
+ if (engine_mask == ALL_ENGINES)
+ intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
- intel_vgpu_reset_gtt(vgpu);
+ if(!vgpu->d3_entered) {
+ intel_vgpu_invalidate_ppgtt(vgpu);
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+ }
+ intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_reset_resource(vgpu);
}
@@ -572,7 +579,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
- vgpu->pv_notified = false;
+ /*
+ * PCI_D0 is set before dmlr, so reset d3_entered here
+ * after done using.
+ */
+ if(vgpu->d3_entered)
+ vgpu->d3_entered = false;
+ else
+ vgpu->pv_notified = false;
}
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 372354d33f55..5ac4a999f05a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1204,6 +1204,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
return dst;
}
+static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
+ const u32 cmd)
+{
+ return desc->cmd.value == (cmd & desc->cmd.mask);
+}
+
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length)
@@ -1242,19 +1248,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
@@ -1478,7 +1484,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
break;
}
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
ret = check_bbstart(cmd, offset, length, batch_length,
batch_addr, shadow_addr,
jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6a3a2ce0b394..3e6cbb0d1150 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1159,7 +1159,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
switch (engine->id) {
default:
MISSING_CASE(engine->id);
- /* fall through */
+ fallthrough;
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 28bc5f13ae52..69c0fa20eba1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
drm_WARN_ON(&i915->drm, event->parent);
-
- module_put(THIS_MODULE);
}
static int
@@ -476,7 +474,7 @@ config_status(struct drm_i915_private *i915, u64 config)
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
return -ENODEV;
- /* Fall-through. */
+ fallthrough;
case I915_PMU_REQUESTED_FREQUENCY:
if (INTEL_GEN(i915) < 6)
return -ENODEV;
@@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event)
if (ret)
return ret;
- if (!event->parent) {
- __module_get(THIS_MODULE);
+ if (!event->parent)
event->destroy = i915_pmu_event_destroy;
- }
return 0;
}
@@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->base.attr_groups)
goto err_attr;
+ pmu->base.module = THIS_MODULE;
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 939a6caebb03..632b912b0bc9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -8,8 +8,6 @@
#include "../i915_selftest.h"
#include "i915_random.h"
-#define SZ_8G (1ULL << 33)
-
static void __igt_dump_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block,
bool buddy)
@@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm)
static void igt_mm_config(u64 *size, u64 *chunk_size)
{
I915_RND_STATE(prng);
- u64 s, ms;
+ u32 s, ms;
/* Nothing fancy, just try to get an interesting bit pattern */
prandom_seed_state(&prng, i915_selftest.random_seed);
- s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
- ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
- s = max(s & -ms, ms);
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
- *chunk_size = ms;
- *size = s;
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
}
static int igt_buddy_alloc_smoke(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9a46be05425a..f127e633f7ca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -24,6 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/iommu.h>
#include <drm/drm_managed.h>
@@ -77,6 +78,7 @@ static void mock_device_release(struct drm_device *dev)
drm_mode_config_cleanup(&i915->drm);
out:
+ i915_params_free(&i915->params);
put_device(&i915->drm.pdev->dev);
i915->drm.pdev = NULL;
}
@@ -118,6 +120,9 @@ struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
struct pci_dev *pdev;
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ struct dev_iommu iommu;
+#endif
int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -136,8 +141,10 @@ struct drm_i915_private *mock_gem_device(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
- /* hack to disable iommu for the fake device; force identity mapping */
- pdev->dev.archdata.iommu = (void *)-1;
+ /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
+ memset(&iommu, 0, sizeof(iommu));
+ iommu.priv = (void *)-1;
+ pdev->dev.iommu = &iommu;
#endif
pci_set_drvdata(pdev, i915);
@@ -159,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void)
i915->drm.pdev = pdev;
drmm_add_final_kfree(&i915->drm, i915);
+ i915_params_copy(&i915->params, &i915_modparams);
+
intel_runtime_pm_init_early(&i915->runtime_pm);
/* Using the global GTT may ask questions about KMS users, so prepare */
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6776ebb3246d..8a4235d9d9f1 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -447,7 +447,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
ubo = drm_plane_state_to_ubo(state);
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
index f12e0271f166..ffc6b584dbf8 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.c
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -205,7 +205,7 @@ static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format)
/* YTR is forbidden for non XBGR formats */
if (modifier & AFBC_FORMAT_MOD_YTR)
return -EINVAL;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return MAFBC_FMT_RGBA8888;
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index a8bcc70644df..1ffbbecafa22 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -654,7 +654,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_addr2,
priv->viu.vd1_stride2,
priv->viu.vd1_height2);
- /* fallthrough */
+ fallthrough;
case 2:
gem = drm_fb_cma_get_gem_obj(fb, 1);
priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
@@ -666,7 +666,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_addr1,
priv->viu.vd1_stride1,
priv->viu.vd1_height1);
- /* fallthrough */
+ fallthrough;
case 1:
gem = drm_fb_cma_get_gem_obj(fb, 0);
priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 9e63a190642c..84a5d9c1f2a2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -59,7 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
@@ -150,7 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index b67b38c8fadf..e1c7bcd1b1eb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -133,7 +133,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
- icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+ dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
return;
}
@@ -157,11 +157,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- /*
- * Eventually we will want to scale the path vote with the frequency but
- * for now leave it at max so that the performance is nominal.
- */
- icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+ dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
}
@@ -204,6 +200,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
+ u32 mask, reset_val;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
+ if (val <= 0x20010004) {
+ mask = 0xffffffff;
+ reset_val = 0xbabeface;
+ } else {
+ mask = 0x1ff;
+ reset_val = 0x100;
+ }
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
@@ -215,7 +221,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
- val == 0xbabeface, 100, 10000);
+ (val & mask) == reset_val, 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
@@ -602,7 +608,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
- /* Fall through */
+ fallthrough;
case GMU_IDLE_STATE_SPTP:
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
GMU_PWR_COL_HYST);
@@ -845,10 +851,24 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp))
return;
+ gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
a6xx_gmu_set_freq(gpu, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
+static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR_OR_NULL(gpu_opp))
+ return;
+
+ dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_put(gpu_opp);
+}
+
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -882,7 +902,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
}
/* Set the bus quota to a reasonable value for boot */
- icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
+ a6xx_gmu_set_initial_bw(gpu, gmu);
/* Enable the GMU interrupt */
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
@@ -1051,7 +1071,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
- icc_set_bw(gpu->icc_path, 0, 0);
+ dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c5a3e4d4c007..3966abd523cc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -117,7 +117,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 959656ad6987..b12f5b4a1bea 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -938,7 +938,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
- a6xx_get_debugbus(gpu, a6xx_state);
+ if (snapshot_debugbus)
+ a6xx_get_debugbus(gpu, a6xx_state);
return &a6xx_state->base;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 846fd5b54c23..2fb58b7098e4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -372,7 +372,7 @@ static const struct a6xx_indexed_registers {
u32 data;
u32 count;
} a6xx_indexed_reglist[] = {
- { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 4e84f3c76f4f..9eeb46bf2a5d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -14,6 +14,10 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
+bool snapshot_debugbus = false;
+MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
+module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index e23641a5ec84..288141fe4c58 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->next = ring->start;
/* reset completed fence seqno: */
- ring->memptrs->fence = ring->seqno;
+ ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0;
}
@@ -474,7 +474,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 99bb468f5f24..e55abae365b5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -21,6 +21,8 @@
#define REG_SKIP ~0
#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+extern bool snapshot_debugbus;
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index f272a8d0f95b..c2729f71e2fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
- bool request_bandwidth;
+ bool request_bandwidth = false;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a97f6d2e5a08..bd6def436c65 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -599,7 +599,10 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
- global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/* perform atomic check on the first physical encoder (master) */
@@ -625,12 +628,15 @@ static int dpu_encoder_virt_atomic_check(
/* Reserve dynamic resources now. */
if (!ret) {
/*
- * Avoid reserving resources when mode set is pending. Topology
- * info may not be available to complete reservation.
+ * Release and Allocate resources on every modeset
+ * Dont allocate when active is false.
*/
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, topology);
+ dpu_rm_release(global_state, drm_enc);
+
+ if (!crtc_state->active_changed || crtc_state->active)
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
}
}
@@ -1181,7 +1187,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- struct dpu_global_state *global_state;
int i = 0;
if (!drm_enc) {
@@ -1200,7 +1205,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc));
@@ -1230,8 +1234,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- dpu_rm_release(global_state, drm_enc);
-
mutex_unlock(&dpu_enc->enc_lock);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 33f6c56f01ed..29e373d2e7b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_new_crtc_state(state->state,
state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+ min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
- pdpu->pipe_sblk->maxupscale << 16,
+ pdpu->pipe_sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 5ccfad794c6a..561bfa48841c 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
+/*
+ * Helpers to control vblanks while we flush.. basically just to ensure
+ * that vblank accounting is switched on, so we get valid seqn/timestamp
+ * on pageflip events (if requested)
+ */
+
+static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+ drm_crtc_vblank_get(crtc);
+ }
+}
+
+static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+ drm_crtc_vblank_put(crtc);
+ }
+}
+
static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
{
unsigned crtc_mask = BIT(crtc_idx);
@@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
kms->funcs->enable_commit(kms);
+ vblank_get(kms, crtc_mask);
+
/*
* Flush hardware updates:
*/
@@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ vblank_put(kms, crtc_mask);
+
mutex_lock(&kms->commit_lock);
kms->funcs->complete_commit(kms, crtc_mask);
mutex_unlock(&kms->commit_lock);
@@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
*/
kms->pending_crtc_mask &= ~crtc_mask;
+ vblank_get(kms, crtc_mask);
+
/*
* Flush hardware updates:
*/
@@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ vblank_put(kms, crtc_mask);
+
mutex_lock(&kms->commit_lock);
kms->funcs->complete_commit(kms, crtc_mask);
mutex_unlock(&kms->commit_lock);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 7d641c7e3514..79333842f70a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1320,6 +1320,13 @@ static int msm_pdev_remove(struct platform_device *pdev)
return 0;
}
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
@@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
+ .shutdown = msm_pdev_shutdown,
.driver = {
.name = "msm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index e397c44cc011..39ecb5a18431 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
+ &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 6639ee9b05d3..48593932bddf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -4915,6 +4915,7 @@ static int dispc_runtime_resume(struct device *dev)
static const struct dev_pm_ops dispc_pm_ops = {
.runtime_suspend = dispc_runtime_suspend,
.runtime_resume = dispc_runtime_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dispchw_driver = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 79ddfbfd1b58..eeccf40bae41 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5467,6 +5467,7 @@ static int dsi_runtime_resume(struct device *dev)
static const struct dev_pm_ops dsi_pm_ops = {
.runtime_suspend = dsi_runtime_suspend,
.runtime_resume = dsi_runtime_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dsihw_driver = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 4d5739fa4a5d..6ccbc29c4ce4 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1614,6 +1614,7 @@ static int dss_runtime_resume(struct device *dev)
static const struct dev_pm_ops dss_pm_ops = {
.runtime_suspend = dss_runtime_suspend,
.runtime_resume = dss_runtime_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver omap_dsshw_driver = {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 4406ce2a08b4..bd12eae0cb31 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -597,7 +597,7 @@ static void venc_bridge_mode_set(struct drm_bridge *bridge,
switch (venc_mode) {
default:
WARN_ON_ONCE(1);
- /* Fall-through */
+ fallthrough;
case VENC_MODE_PAL:
venc->config = &venc_config_pal_trm;
break;
@@ -903,6 +903,7 @@ static int venc_runtime_resume(struct device *dev)
static const struct dev_pm_ops venc_pm_ops = {
.runtime_suspend = venc_runtime_suspend,
.runtime_resume = venc_runtime_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static const struct of_device_id venc_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 528764566b17..de95dc1b861c 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -89,7 +89,7 @@ static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *conn
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct drm_display_mode new_mode = { { 0 } };
+ struct drm_display_mode new_mode = {};
enum drm_mode_status status;
status = omap_connector_mode_fixup(omap_connector->output, mode,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 6d40914675da..328a4a74f534 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
if (omap_state->manually_updated)
return;
- spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc);
+
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
+ spin_lock_irq(&crtc->dev->event_lock);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 1a49e619aacf..e8f7b11352d2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
- ops->map(ops, iova, paddr, pgsize, prot);
+ ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ba20c6f03719..886e9959496f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4856,7 +4856,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3b7ead5be5bf..73f67bf222e1 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -820,7 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
((idx_value >> 21) & 0xF));
return -EINVAL;
}
- /* Fall through. */
+ fallthrough;
case 6:
track->cb[i].cpp = 4;
break;
@@ -971,7 +971,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
- /* Fall through. */
+ fallthrough;
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1d4c04e0a449..50b89b6d9a6c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -115,7 +115,7 @@ void r420_pipes_init(struct radeon_device *rdev)
default:
/* force to 1 pipe */
num_pipes = 1;
- /* fall through */
+ fallthrough;
case 1:
tmp = (0 << 1);
break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 49e8266461f8..390a9621604a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -487,7 +487,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return -EINVAL;
}
}
- /* fall through */
+ fallthrough;
case V_0280A0_CLEAR_ENABLE:
{
uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
@@ -1535,7 +1535,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
break;
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
is_array = true;
- /* fall through */
+ fallthrough;
case V_038000_SQ_TEX_DIM_2D_MSAA:
array_check.nsamples = 1 << llevel;
llevel = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1ad5c3b86b64..57fb3eb3a4b4 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -454,7 +454,7 @@ static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
if (p->rdev->family >= CHIP_PALM)
return 0;
- /* fall through */
+ fallthrough;
default:
DRM_ERROR("UVD codec not supported by hardware %d!\n",
stream_type);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a167e1c36d24..d1c73e9db889 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5744,7 +5744,7 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic
si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index f858d8d06347..800721153d51 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -219,7 +219,7 @@ done:
WREG32(RS_DQ_RD_RET_CONF, 0x3f);
WREG32(MC_CONFIG, 0x1f);
- /* fall through */
+ fallthrough;
case CHIP_RV670:
case CHIP_RV635:
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index a2ac25c11c90..e0d40ae67d54 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -306,7 +306,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
@@ -444,7 +444,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
@@ -566,7 +566,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
@@ -705,7 +705,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
@@ -1066,7 +1066,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
ret = -EINVAL;
goto done;
}
- /* fall through */
+ fallthrough;
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 008f07923bbc..38a558768e53 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -850,13 +850,13 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
switch (info->channels) {
case 8:
audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
- /* fall through */
+ fallthrough;
case 6:
audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
- /* fall through */
+ fallthrough;
case 4:
audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
- /* fall through */
+ fallthrough;
case 2:
audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
break;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 359b56e43b83..ced9a8287dd8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -195,7 +195,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_LVDS:
is_lvds = true;
- /* Fallthrough */
+ fallthrough;
case DRM_MODE_ENCODER_DSI:
case DRM_MODE_ENCODER_NONE:
channel = 0;
@@ -342,7 +342,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
- /* Fall through */
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index aa67cb037e9d..7f13f4d715bf 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1027,7 +1027,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 9a0b3240bc58..424ad60b4f38 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -135,7 +135,7 @@ static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
default:
WARN_ON_ONCE(1);
- /* fallthrough */
+ fallthrough;
case 4:
max = 4;
break;
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 808c8af58fd5..09485c7f0d6f 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -154,7 +154,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
break;
case DISPC_VP_DPI:
enc_type = DRM_MODE_ENCODER_DPI;
- conn_type = DRM_MODE_CONNECTOR_LVDS;
+ conn_type = DRM_MODE_CONNECTOR_DPI;
break;
default:
WARN_ON(1);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 1856962411c7..518220bd092a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -386,7 +386,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
- /* fallthrough */
+ fallthrough;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f297fd5e02d4..cc6a4e7551e3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -287,11 +287,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
*/
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
- bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
-
- ret = ttm_tt_create(bo, zero);
- if (ret)
- goto out_err;
+ if (bo->ttm == NULL) {
+ bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+ ret = ttm_tt_create(bo, zero);
+ if (ret)
+ goto out_err;
+ }
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
@@ -652,8 +653,13 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement)
- return ttm_bo_pipeline_gutting(bo);
+ if (!placement.num_placement && !placement.num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -1192,8 +1198,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement)
- return ttm_bo_pipeline_gutting(bo);
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
/*
* Check whether we need to move buffer.
@@ -1210,6 +1221,14 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE);
}
+ /*
+ * We might need to add a TTM.
+ */
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ ret = ttm_tt_create(bo, true);
+ if (ret)
+ return ret;
+ }
return 0;
}
EXPORT_SYMBOL(ttm_bo_validate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7fb3e0bcbab4..e6c8bd254055 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -531,15 +531,12 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.interruptible = false,
.no_wait_gpu = false
};
- struct ttm_tt *ttm;
+ struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
int ret;
- ret = ttm_tt_create(bo, true);
- if (ret)
- return ret;
+ BUG_ON(!ttm);
- ttm = bo->ttm;
ret = ttm_tt_populate(ttm, &ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index d7a6537dd6ee..4732dcc80e11 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -351,11 +351,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
- if (ttm_tt_create(bo, true)) {
- ret = VM_FAULT_OOM;
- goto out_io_unlock;
- }
-
ttm = bo->ttm;
if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
@@ -510,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
- unsigned long offset = (addr) - vma->vm_start;
struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
@@ -528,7 +525,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
if (unlikely(ret != 0))
return ret;
}
- /* fall through */
+ fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9d1c7177384c..3437711ddb43 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -50,9 +50,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
dma_resv_assert_held(bo->base.resv);
- if (bo->ttm)
- return 0;
-
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
@@ -70,6 +67,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
page_flags |= TTM_PAGE_FLAG_SG;
break;
default:
+ bo->ttm = NULL;
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 551fa31629af..5771bb53ce6a 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -179,21 +179,21 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
- /* fall through */
+ fallthrough;
case dr_via_desc_pages_alloc:
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
free_page((unsigned long)vsg->desc_pages[i]);
}
kfree(vsg->desc_pages);
- /* fall through */
+ fallthrough;
case dr_via_pages_locked:
unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE));
- /* fall through */
+ fallthrough;
case dr_via_pages_alloc:
vfree(vsg->pages);
- /* fall through */
+ fallthrough;
default:
vsg->state = dr_via_sg_init;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 7a2430e34e00..c8da7adc6b30 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ dma_fence_put(&out_fence->f);
virtio_gpu_notify(vgdev);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 0a5c8cf409fb..4d944a0dff3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -39,8 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
u32 events_read, events_clear = 0;
/* read the config space */
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- events_read, &events_read);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
@@ -49,8 +49,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
- virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
- events_clear, &events_clear);
+ virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
+ events_clear, &events_clear);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -165,8 +165,8 @@ int virtio_gpu_init(struct drm_device *dev)
}
/* get display info */
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- num_scanouts, &num_scanouts);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
if (!vgdev->num_scanouts) {
@@ -176,8 +176,8 @@ int virtio_gpu_init(struct drm_device *dev)
}
DRM_INFO("number of scanouts: %d\n", num_scanouts);
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- num_capsets, &num_capsets);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets);
virtio_gpu_modeset_init(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 346cef5ce251..e83651b7747d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
}
sg_free_table(shmem->pages);
+ kfree(shmem->pages);
shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}
@@ -141,7 +142,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry **ents,
unsigned int *nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
int si, ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9e663a5d9952..53af60d484a4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
@@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 4284c4bd444d..e67e2e8f6e6f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3037,7 +3037,7 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
cmd->body.soid);
if (IS_ERR(res)) {
- DRM_ERROR("Cound not find streamoutput to bind.\n");
+ DRM_ERROR("Could not find streamoutput to bind.\n");
return PTR_ERR(res);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbce45d142aa..312ed0881a99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -186,7 +186,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
- DRM_ERROR("Cant snoop dma request for cursor!\n");
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
box->srcx, box->srcy, box->srcz,
box->x, box->y, box->z,
@@ -2575,7 +2575,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
++i;
}
- if (i != unit) {
+ if (&con->head == &dev_priv->dev->mode_config.connector_list) {
DRM_ERROR("Could not find initial display unit.\n");
ret = -EINVAL;
goto out_unlock;
@@ -2599,13 +2599,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
break;
}
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- *p_mode = mode;
- else {
+ if (&mode->head == &con->modes) {
WARN_ONCE(true, "Could not find initial preferred mode.\n");
*p_mode = list_first_entry(&con->modes,
struct drm_display_mode,
head);
+ } else {
+ *p_mode = mode;
}
out_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 16dafff5cab1..c4017c7a24db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
struct vmw_legacy_display_unit *entry;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
crtc = &entry->base.crtc;
w = max(w, crtc->x + crtc->mode.hdisplay);
h = max(h, crtc->y + crtc->mode.vdisplay);
- i++;
}
if (crtc == NULL)
return 0;
- fb = entry->base.crtc.primary->state->fb;
+ fb = crtc->primary->state->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->format->cpp[0] * 8,
@@ -388,8 +387,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
ldu->base.is_implicit = true;
/* Initialize primary plane */
- vmw_du_plane_reset(primary);
-
ret = drm_universal_plane_init(dev, &ldu->base.primary,
0, &vmw_ldu_plane_funcs,
vmw_primary_plane_formats,
@@ -403,8 +400,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
/* Initialize cursor plane */
- vmw_du_plane_reset(cursor);
-
ret = drm_universal_plane_init(dev, &ldu->base.cursor,
0, &vmw_ldu_cursor_funcs,
vmw_cursor_plane_formats,
@@ -418,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
- vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
@@ -446,7 +440,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
- vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
&vmw_legacy_crtc_funcs, NULL);
@@ -521,6 +514,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_legacy;
+ drm_mode_config_reset(dev);
+
DRM_INFO("Legacy Display Unit initialized\n");
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32a22e4eddb1..4bf0f5ec4fc2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -859,8 +859,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.is_implicit = false;
/* Initialize primary plane */
- vmw_du_plane_reset(primary);
-
ret = drm_universal_plane_init(dev, &sou->base.primary,
0, &vmw_sou_plane_funcs,
vmw_primary_plane_formats,
@@ -875,8 +873,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
- vmw_du_plane_reset(cursor);
-
ret = drm_universal_plane_init(dev, &sou->base.cursor,
0, &vmw_sou_cursor_funcs,
vmw_cursor_plane_formats,
@@ -890,7 +886,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
- vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
@@ -918,8 +913,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
-
- vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
&sou->base.cursor,
&vmw_screen_object_crtc_funcs, NULL);
@@ -973,6 +966,8 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_object;
+ drm_mode_config_reset(dev);
+
DRM_INFO("Screen Objects Display Unit initialized\n");
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 16b385629688..cf3aafd00837 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1738,8 +1738,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.is_implicit = false;
/* Initialize primary plane */
- vmw_du_plane_reset(primary);
-
ret = drm_universal_plane_init(dev, primary,
0, &vmw_stdu_plane_funcs,
vmw_primary_plane_formats,
@@ -1754,8 +1752,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
- vmw_du_plane_reset(cursor);
-
ret = drm_universal_plane_init(dev, cursor,
0, &vmw_stdu_cursor_funcs,
vmw_cursor_plane_formats,
@@ -1769,8 +1765,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs);
- vmw_du_connector_reset(connector);
-
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
@@ -1798,7 +1792,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
- vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary,
&stdu->base.cursor,
&vmw_stdu_crtc_funcs, NULL);
@@ -1894,6 +1887,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
}
}
+ drm_mode_config_reset(dev);
+
DRM_INFO("Screen Target Display device initialized\n");
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 126f93c0b0b8..3914bfee0533 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1969,7 +1969,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_mip = 1;
num_subres = num_layers * num_mip;
- dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
+ dirty_size = struct_size(dirty, boxes, num_subres);
acc_size = ttm_round_pot(dirty_size);
ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
acc_size, &ctx);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3e660fb111b3..cc93a8c9547b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -157,7 +157,8 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
- u32 bpp, u64 size, struct page **pages)
+ u32 bpp, u64 size, u32 offset,
+ struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
struct xen_drm_front_dbuf *dbuf;
@@ -194,6 +195,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
req->op.dbuf_create.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
+ req->op.dbuf_create.data_ofs = offset;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
req->op.dbuf_create.height = height;
@@ -400,15 +402,15 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
args->size = args->pitch * args->height;
obj = xen_drm_front_gem_create(dev, args->size);
- if (IS_ERR_OR_NULL(obj)) {
- ret = PTR_ERR_OR_ZERO(obj);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto fail;
}
ret = xen_drm_front_dbuf_create(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(obj),
args->width, args->height, args->bpp,
- args->size,
+ args->size, 0,
xen_drm_front_gem_get_pages(obj));
if (ret)
goto fail_backend;
@@ -647,9 +649,7 @@ static void displback_changed(struct xenbus_device *xb_dev,
switch (backend_state) {
case XenbusStateReconfiguring:
- /* fall through */
case XenbusStateReconfigured:
- /* fall through */
case XenbusStateInitialised:
break;
@@ -699,7 +699,6 @@ static void displback_changed(struct xenbus_device *xb_dev,
break;
case XenbusStateUnknown:
- /* fall through */
case XenbusStateClosed:
if (xb_dev->state == XenbusStateClosed)
break;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index f92c258350ca..54486d89650e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -145,7 +145,7 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
- u32 bpp, u64 size, struct page **pages);
+ u32 bpp, u64 size, u32 offset, struct page **pages);
int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u64 fb_cookie, u32 width,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
index 459702fa990e..44f1f70c0aed 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -33,6 +33,7 @@ static const u32 plane_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_YUYV,
};
const u32 *xen_drm_front_conn_get_formats(int *format_count)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index f0b85e094111..39ff95b75357 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
size = round_up(size, PAGE_SIZE);
xen_obj = gem_create_obj(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return xen_obj;
if (drm_info->front_info->cfg.be_alloc) {
@@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
*/
xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
- if (IS_ERR_OR_NULL(xen_obj->pages)) {
+ if (IS_ERR(xen_obj->pages)) {
ret = PTR_ERR(xen_obj->pages);
xen_obj->pages = NULL;
goto fail;
@@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
struct xen_gem_object *xen_obj;
xen_obj = gem_create(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return ERR_CAST(xen_obj);
return &xen_obj->base;
@@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
size = attach->dmabuf->size;
xen_obj = gem_create_obj(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return ERR_CAST(xen_obj);
ret = gem_alloc_pages_array(xen_obj, size);
@@ -210,7 +210,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
ret = xen_drm_front_dbuf_create(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(&xen_obj->base),
- 0, 0, 0, size, xen_obj->pages);
+ 0, 0, 0, size, sgt->sgl->offset,
+ xen_obj->pages);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 78096bbcd226..ef11b1e4de39 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
int ret;
fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
- if (IS_ERR_OR_NULL(fb))
+ if (IS_ERR(fb))
return fb;
gem_obj = fb->obj[0];
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 821f7a71e182..99158ee67d02 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -44,7 +44,7 @@ MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
*/
static uint zynqmp_dp_power_on_delay_ms = 4;
module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
-MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
/* Link configuration registers */
#define ZYNQMP_DP_LINK_BW_SET 0x0
@@ -567,34 +567,37 @@ static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
u8 current_bw)
{
int max_rate = dp->link_config.max_rate;
- u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 bw_code;
u8 max_lanes = dp->link_config.max_lanes;
u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
u8 bpp = dp->config.bpp;
u8 lane_cnt;
- s8 i;
- if (current_bw == DP_LINK_BW_1_62) {
+ /* Downshift from current bandwidth */
+ switch (current_bw) {
+ case DP_LINK_BW_5_4:
+ bw_code = DP_LINK_BW_2_7;
+ break;
+ case DP_LINK_BW_2_7:
+ bw_code = DP_LINK_BW_1_62;
+ break;
+ case DP_LINK_BW_1_62:
dev_err(dp->dev, "can't downshift. already lowest link rate\n");
return -EINVAL;
- }
-
- for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
- if (current_bw && bws[i] >= current_bw)
- continue;
-
- if (bws[i] <= max_link_rate_code)
- break;
+ default:
+ /* If not given, start with max supported */
+ bw_code = max_link_rate_code;
+ break;
}
for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
int bw;
u32 rate;
- bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ bw = drm_dp_bw_code_to_link_rate(bw_code);
rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
if (pclock <= rate) {
- dp->mode.bw_code = bws[i];
+ dp->mode.bw_code = bw_code;
dp->mode.lane_cnt = lane_cnt;
dp->mode.pclock = pclock;
return dp->mode.bw_code;
@@ -1308,7 +1311,7 @@ zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
sizeof(dp->dpcd));
if (ret < 0) {
- dev_dbg(dp->dev, "DPCD read failes");
+ dev_dbg(dp->dev, "DPCD read failed");
goto disconnected;
}
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index dbcc16721931..34b4075a6a8e 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -141,7 +141,7 @@ static int ipu_bus_format_to_map(u32 fmt)
switch (fmt) {
default:
WARN_ON(1);
- /* fall-through */
+ fallthrough;
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index f2f3ef8af271..5180c5687ee5 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -529,7 +529,7 @@ EXPORT_SYMBOL(vga_get);
*
* 0 on success, negative error code on failure.
*/
-int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
+static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
unsigned long flags;
@@ -554,7 +554,6 @@ bail:
spin_unlock_irqrestore(&vga_lock, flags);
return rc;
}
-EXPORT_SYMBOL(vga_tryget);
/**
* vga_put - release lock on legacy VGA resources
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index ef0cbcd7540d..fcaf8466e627 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -680,7 +680,7 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i)
* but it does have a separate power-on (reset) value.
*/
g15->leds[i].cdev.name = "g15::power_on_backlight_val";
- /* fall through */
+ fallthrough;
case LG_G15_KBD_BRIGHTNESS:
g15->leds[i].cdev.brightness_set_blocking =
lg_g510_kbd_led_set;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a78c13cc9f47..38ee25a813b9 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -844,7 +844,7 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
workitem.type = WORKITEM_TYPE_EMPTY;
break;
}
- /* fall-through */
+ fallthrough;
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
workitem.quad_id_msb =
dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB];
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 8cb1ca1936e4..071fd093a5f4 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -163,16 +163,13 @@ static int ms_surface_dial_quirk(struct hid_input *hi, struct hid_field *field,
{
switch (usage->hid & HID_USAGE_PAGE) {
case 0xff070000:
- /* fall-through */
case HID_UP_DIGITIZER:
/* ignore those axis */
return -1;
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- /* fall-through */
case HID_GD_Y:
- /* fall-through */
case HID_GD_RFKILL_BTN:
/* ignore those axis */
return -1;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 8cffa84c9650..7f41213d5ae3 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -428,7 +428,6 @@ static void rmi_report(struct hid_device *hid, struct hid_report *report)
switch (report->id) {
case RMI_READ_DATA_REPORT_ID:
- /* fall-through */
case RMI_ATTN_REPORT_ID:
return;
}
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 1a6e600197d0..2ff4c8e366ff 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -780,7 +780,7 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
case kone_mouse_event_switch_profile:
kone->actual_dpi = kone->profiles[event->value - 1].
startup_dpi;
- /* fall through */
+ fallthrough;
case kone_mouse_event_osd_profile:
kone->actual_profile = event->value;
break;
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 78a364ae2f68..7d20d1fcf8d2 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -974,7 +974,7 @@ int uclogic_params_init(struct uclogic_params *params,
}
break;
}
- /* FALL THROUGH */
+ fallthrough;
case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_HUION,
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 679e142fc850..e484c3618dec 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1672,7 +1672,6 @@ static ssize_t wiimote_ext_show(struct device *dev,
case WIIMOTE_EXT_GUITAR:
return sprintf(buf, "guitar\n");
case WIIMOTE_EXT_UNKNOWN:
- /* fallthrough */
default:
return sprintf(buf, "unknown\n");
}
@@ -1722,7 +1721,6 @@ static ssize_t wiimote_dev_show(struct device *dev,
case WIIMOTE_DEV_PENDING:
return sprintf(buf, "pending\n");
case WIIMOTE_DEV_UNKNOWN:
- /* fallthrough */
default:
return sprintf(buf, "unknown\n");
}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4f97e6c12059..45e0b1c75cb1 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -785,7 +785,6 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case HIDIOCGUCODE:
- /* fall through */
case HIDIOCGUSAGE:
case HIDIOCSUSAGE:
case HIDIOCGUSAGES:
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1c96809b51c9..83dfec327c42 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -341,7 +341,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
case 2: /* Mouse with wheel */
input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
- /* fall through */
+ fallthrough;
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
@@ -1201,7 +1201,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
case 0x04:
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
- /* fall through */
+ fallthrough;
case 0x03:
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
@@ -2148,7 +2148,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, features->numbered_buttons,
value, i);
- /* fall through*/
+ fallthrough;
default:
do_report = true;
break;
@@ -3602,14 +3602,14 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
switch (features->type) {
case GRAPHIRE_BT:
__clear_bit(ABS_MISC, input_dev->absbit);
- /* fall through */
+ fallthrough;
case WACOM_MO:
case WACOM_G4:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
features->distance_fuzz, 0);
- /* fall through */
+ fallthrough;
case GRAPHIRE:
input_set_capability(input_dev, EV_REL, REL_WHEEL);
@@ -3649,7 +3649,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case INTUOS4S:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
- /* fall through */
+ fallthrough;
case INTUOS:
wacom_setup_intuos(wacom_wac);
@@ -3682,7 +3682,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case TABLETPC:
case TABLETPCE:
__clear_bit(ABS_MISC, input_dev->absbit);
- /* fall through */
+ fallthrough;
case DTUS:
case DTUSX:
@@ -3696,7 +3696,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case PTU:
__set_bit(BTN_STYLUS2, input_dev->keybit);
- /* fall through */
+ fallthrough;
case PENPARTNER:
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -3799,7 +3799,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
- /* fall through */
+ fallthrough;
case INTUOS5:
case INTUOS5L:
@@ -3817,7 +3817,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- /* fall through */
+ fallthrough;
case WACOM_27QHDT:
if (wacom_wac->shared->touch->product == 0x32C ||
@@ -3826,14 +3826,14 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
wacom_wac->shared->has_mute_touch_switch = true;
}
- /* fall through */
+ fallthrough;
case MTSCREEN:
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
- /*fall through */
+ fallthrough;
case TABLETPC:
case TABLETPCE:
@@ -3843,7 +3843,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
case INTUOSHT2:
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
- /* fall through */
+ fallthrough;
case BAMBOO_PT:
case BAMBOO_TOUCH:
@@ -4099,7 +4099,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(KEY_BUTTONCONFIG, input_dev->keybit);
__set_bit(KEY_INFO, input_dev->keybit);
- /* fall through */
+ fallthrough;
case WACOM_21UX2:
case WACOM_BEE:
@@ -4115,7 +4115,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOS3:
case INTUOS3L:
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- /* fall through */
+ fallthrough;
case INTUOS3S:
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
@@ -4139,7 +4139,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
* ID_INPUT_TABLET to be set.
*/
__set_bit(BTN_STYLUS, input_dev->keybit);
- /* fall through */
+ fallthrough;
case INTUOS4:
case INTUOS4L:
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 365b5d5967ac..96d0eccca3aa 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -291,7 +291,7 @@ static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
/* CMT speech workaround */
if (atomic_read(&ssi->tx_usecnt))
break;
- /* Else, fall through */
+ fallthrough;
case RECEIVING:
mod_timer(&ssi->keep_alive, jiffies +
msecs_to_jiffies(SSIP_KATOUT));
@@ -466,7 +466,7 @@ static void ssip_keep_alive(struct timer_list *t)
case SEND_READY:
if (atomic_read(&ssi->tx_usecnt) == 0)
break;
- /* Fall through */
+ fallthrough;
/*
* Workaround for cmt-speech in that case
* we relay on audio timers.
@@ -668,7 +668,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
case ACTIVE:
dev_err(&cl->device, "Boot info req on active state\n");
ssip_error(cl);
- /* Fall through */
+ fallthrough;
case INIT:
case HANDSHAKE:
spin_lock_bh(&ssi->lock);
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 4bc4a201f0f6..fa69b94debd9 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -296,7 +296,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
break;
case ABORT_RATE_CHANGE:
dev_dbg(&ssi->device, "abort rate change\n");
- /* Fall through */
+ fallthrough;
case POST_RATE_CHANGE:
dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n",
clk_data->old_rate, clk_data->new_rate);
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index e74b144b8f3d..754d35a25a1c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -354,7 +354,7 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
- /* fallthrough */
+ fallthrough;
case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 92ee0fe4c919..a4e8d96513c2 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -282,26 +282,52 @@ static struct {
spinlock_t lock;
} host_ts;
-static struct timespec64 hv_get_adj_host_time(void)
+static inline u64 reftime_to_ns(u64 reftime)
{
- struct timespec64 ts;
- u64 newtime, reftime;
+ return (reftime - WLTIMEDELTA) * 100;
+}
+
+/*
+ * Hard coded threshold for host timesync delay: 600 seconds
+ */
+static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
+
+static int hv_get_adj_host_time(struct timespec64 *ts)
+{
+ u64 newtime, reftime, timediff_adj;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&host_ts.lock, flags);
reftime = hv_read_reference_counter();
- newtime = host_ts.host_time + (reftime - host_ts.ref_time);
- ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+
+ /*
+ * We need to let the caller know that last update from host
+ * is older than the max allowable threshold. clock_gettime()
+ * and PTP ioctl do not have a documented error that we could
+ * return for this specific case. Use ESTALE to report this.
+ */
+ timediff_adj = reftime - host_ts.ref_time;
+ if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
+ pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
+ (timediff_adj * 100));
+ ret = -ESTALE;
+ }
+
+ newtime = host_ts.host_time + timediff_adj;
+ *ts = ns_to_timespec64(reftime_to_ns(newtime));
spin_unlock_irqrestore(&host_ts.lock, flags);
- return ts;
+ return ret;
}
static void hv_set_host_time(struct work_struct *work)
{
- struct timespec64 ts = hv_get_adj_host_time();
- do_settimeofday64(&ts);
+ struct timespec64 ts;
+
+ if (!hv_get_adj_host_time(&ts))
+ do_settimeofday64(&ts);
}
/*
@@ -361,10 +387,23 @@ static void timesync_onchannelcallback(void *context)
struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
- vmbus_recvpacket(channel, time_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ /*
+ * Drain the ring buffer and use the last packet to update
+ * host_ts
+ */
+ while (1) {
+ int ret = vmbus_recvpacket(channel, time_txf_buf,
+ HV_HYP_PAGE_SIZE, &recvlen,
+ &requestid);
+ if (ret) {
+ pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
+ break;
+ }
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
@@ -622,9 +661,7 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
- *ts = hv_get_adj_host_time();
-
- return 0;
+ return hv_get_adj_host_time(ts);
}
static struct ptp_clock_info ptp_hyperv_info = {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b50081cacf04..910b6e90866c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -86,6 +86,10 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
+ /* Don't notify Hyper-V if the die event is other than oops */
+ if (val != DIE_OOPS)
+ return NOTIFY_DONE;
+
/*
* Hyper-V should be notified only once about a panic. If we will be
* doing hyperv_report_panic_msg() later with kmsg data, don't do
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 319a0519ebdb..208813158bb4 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -435,7 +435,7 @@ static const char *voltage_label(struct adt7462_data *data, int which)
case 3:
return "+1.5V";
}
- /* fall through */
+ fallthrough;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return "+12V3";
@@ -493,7 +493,7 @@ static const char *voltage_label(struct adt7462_data *data, int which)
case 3:
return "+1.5";
}
- /* fall through */
+ fallthrough;
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
@@ -531,7 +531,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which)
case 3:
return 7800;
}
- /* fall through */
+ fallthrough;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 62500;
@@ -589,7 +589,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which)
case 3:
return 7800;
}
- /* fall through */
+ fallthrough;
case 11:
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 316618409315..a18887990f4a 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -753,15 +753,18 @@ static ssize_t applesmc_light_show(struct device *dev,
}
ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
+ if (ret)
+ goto out;
/* newer macbooks report a single 10-bit bigendian value */
if (data_length == 10) {
left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
goto out;
}
left = buffer[2];
+
+ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
if (ret)
goto out;
- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
right = buffer[2];
out:
@@ -810,12 +813,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
- speed = ((buffer[0] << 8 | buffer[1]) >> 2);
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
+
+ speed = ((buffer[0] << 8 | buffer[1]) >> 2);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
}
static ssize_t applesmc_store_fan_speed(struct device *dev,
@@ -851,12 +853,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
u8 buffer[2];
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
+
+ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
}
static ssize_t applesmc_store_fan_manual(struct device *dev,
@@ -872,10 +873,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- val = (buffer[0] << 8 | buffer[1]);
if (ret)
goto out;
+ val = (buffer[0] << 8 | buffer[1]);
+
if (input)
val = val | (0x01 << to_index(attr));
else
@@ -951,13 +953,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
u32 count;
ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
- ((u32)buffer[2]<<8) + buffer[3];
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
+
+ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
+ ((u32)buffer[2]<<8) + buffer[3];
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
}
static ssize_t applesmc_key_at_index_read_show(struct device *dev,
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index cf0962f7a020..e9c0bbc2caa9 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -406,10 +406,10 @@ static int emc1403_probe(struct i2c_client *client,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
- /* fall through */
+ fallthrough;
case emc1403:
data->groups[1] = &emc1403_group;
- /* fall through */
+ fallthrough;
case emc1402:
data->groups[0] = &emc1402_group;
}
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index d09deb409de7..4dec793fd07d 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1285,7 +1285,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->pwm_auto_point_pwm[nr][0] =
f71882fg_read8(data,
F71882FG_REG_POINT_PWM(nr, 0));
- /* Fall through */
+ fallthrough;
case f71862fg:
data->pwm_auto_point_pwm[nr][1] =
f71882fg_read8(data,
@@ -2442,7 +2442,7 @@ static int f71882fg_probe(struct platform_device *pdev)
case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
- /* Fall through - to select correct fan/pwm reg bank! */
+ fallthrough; /* to select correct fan/pwm reg bank! */
case f71889fg:
case f71889ed:
case f71889a:
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 3dfe2ca2f8c8..c6d4567f3952 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -172,6 +172,7 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case mode_temperature:
if (tmp > 0x8000)
tmp -= 0xffff;
+ tmp *= 100; /* convert to millidegrees celsius */
break;
case mode_voltage_raw:
tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index eb72e390844e..6d1175a51832 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -96,7 +96,7 @@ int vid_from_reg(int val, u8 vrm)
val &= 0x1f;
if (val == 0x1f)
return 0;
- /* fall through */
+ fallthrough;
case 25: /* AMD NPT 0Fh */
val &= 0x3f;
return (val < 32) ? 1550 - 25 * val
@@ -122,7 +122,7 @@ int vid_from_reg(int val, u8 vrm)
case 84: /* VRM 8.4 */
val &= 0x0f;
- /* fall through */
+ fallthrough;
case 82: /* VRM 8.2 */
val &= 0x1f;
return val == 0x1f ? 0 :
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 7fc5b065ad8b..81e155692aba 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -352,7 +352,7 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
if (ret)
return ret;
- /* fall through */
+ fallthrough;
case hwmon_curr_crit:
case hwmon_curr_max:
if (!resistance_uo)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 750b08713dee..5bd15622a85f 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -2669,7 +2669,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* fall through */
+ fallthrough;
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b0425694f702..242ff8bee78d 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -231,7 +231,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
if (ret < 0)
return ret;
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
- if (cnt == 0x1fff)
+ if (cnt == 0 || cnt == 0x1fff)
rpm = 0;
else
rpm = 1350000 / cnt;
@@ -243,7 +243,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
if (ret < 0)
return ret;
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
- if (cnt == 0x1fff)
+ if (cnt == 0 || cnt == 0x1fff)
rpm = 0;
else
rpm = 1350000 / cnt;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 30e18eb60da7..a71777990d49 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -752,7 +752,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
switch (sensors->freq.version) {
case 2:
show_freq = occ_show_freq_2;
- /* fall through */
+ fallthrough;
case 1:
num_attrs += (sensors->freq.num_sensors * 2);
break;
@@ -763,7 +763,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
switch (sensors->power.version) {
case 2:
show_power = occ_show_power_2;
- /* fall through */
+ fallthrough;
case 1:
num_attrs += (sensors->power.num_sensors * 4);
break;
@@ -781,7 +781,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
break;
case 3:
show_caps = occ_show_caps_3;
- /* fall through */
+ fallthrough;
case 2:
num_attrs += (sensors->caps.num_sensors * 8);
break;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 0c622711ef7e..58aa95a3c010 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -67,6 +67,7 @@ enum variants {
raa_dmpvr1_2rail,
raa_dmpvr2_1rail,
raa_dmpvr2_2rail,
+ raa_dmpvr2_2rail_nontc,
raa_dmpvr2_3rail,
raa_dmpvr2_hv,
};
@@ -241,6 +242,10 @@ static int isl68137_probe(struct i2c_client *client,
info->pages = 1;
info->read_word_data = raa_dmpvr2_read_word_data;
break;
+ case raa_dmpvr2_2rail_nontc:
+ info->func[0] &= ~PMBUS_HAVE_TEMP;
+ info->func[1] &= ~PMBUS_HAVE_TEMP;
+ fallthrough;
case raa_dmpvr2_2rail:
info->pages = 2;
info->read_word_data = raa_dmpvr2_read_word_data;
@@ -304,7 +309,7 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
{"raa228000", raa_dmpvr2_hv},
{"raa228004", raa_dmpvr2_hv},
{"raa228006", raa_dmpvr2_hv},
- {"raa228228", raa_dmpvr2_2rail},
+ {"raa228228", raa_dmpvr2_2rail_nontc},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
{}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 30b7b3ea8836..17bb64299bfd 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -447,7 +447,7 @@ static int pwm_fan_resume(struct device *dev)
return 0;
pwm_get_args(ctx->pwm, &pargs);
- duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
+ duty = DIV_ROUND_UP_ULL(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
ret = pwm_config(ctx->pwm, duty, pargs.period);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index e1d10a6b7f7c..a07b97400cba 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1213,7 +1213,7 @@ temp_type_store(struct device *dev, struct device_attribute *devattr,
case W83781D_DEFAULT_BETA:
dev_warn(dev, "Sensor type %d is deprecated, please use 4 "
"instead\n", W83781D_DEFAULT_BETA);
- /* fall through */
+ fallthrough;
case 4: /* thermistor */
tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
w83627hf_write_value(data, W83781D_REG_SCFG1,
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 015f1ea31966..d833a4f16c47 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -814,7 +814,7 @@ store_sensor(struct device *dev, struct device_attribute *da,
dev_warn(dev,
"Sensor type %d is deprecated, please use 4 instead\n",
W83781D_DEFAULT_BETA);
- /* fall through */
+ fallthrough;
case 4: /* thermistor */
tmp = w83781d_read_value(data, W83781D_REG_SCFG1);
w83781d_write_value(data, W83781D_REG_SCFG1,
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 44f68b965aec..6d52b530b429 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2127,7 +2127,7 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config,
if (temp_chan >= 4)
break;
data->temp_mode |= 1 << temp_chan;
- /* fall through */
+ fallthrough;
case 0x3: /* Thermistor */
data->has_temp |= 1 << temp_chan;
break;
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 826a1054100d..32cd26352f38 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -6,9 +6,10 @@
menuconfig HWSPINLOCK
bool "Hardware Spinlock drivers"
+if HWSPINLOCK
+
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST
help
Say y here to support the OMAP Hardware Spinlock device (firstly
@@ -18,7 +19,6 @@ config HWSPINLOCK_OMAP
config HWSPINLOCK_QCOM
tristate "Qualcomm Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
@@ -30,7 +30,6 @@ config HWSPINLOCK_QCOM
config HWSPINLOCK_SIRF
tristate "SIRF Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_SIRF || COMPILE_TEST
help
Say y here to support the SIRF Hardware Spinlock device, which
@@ -43,7 +42,6 @@ config HWSPINLOCK_SIRF
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
depends on ARCH_SPRD || COMPILE_TEST
- depends on HWSPINLOCK
help
Say y here to support the SPRD Hardware Spinlock device.
@@ -52,7 +50,6 @@ config HWSPINLOCK_SPRD
config HWSPINLOCK_STM32
tristate "STM32 Hardware Spinlock device"
depends on MACH_STM32MP157 || COMPILE_TEST
- depends on HWSPINLOCK
help
Say y here to support the STM32 Hardware Spinlock device.
@@ -60,7 +57,6 @@ config HWSPINLOCK_STM32
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
- depends on HWSPINLOCK
depends on ARCH_U8500 || COMPILE_TEST
help
Say y here to support the STE Hardware Semaphore functionality, which
@@ -68,3 +64,5 @@ config HSEM_U8500
SoC.
If unsure, say N.
+
+endif # HWSPINLOCK
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index f0da544b14d2..364710966665 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -70,41 +70,79 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
-static int qcom_hwspinlock_probe(struct platform_device *pdev)
+static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev,
+ u32 *base, u32 *stride)
{
- struct hwspinlock_device *bank;
struct device_node *syscon;
- struct reg_field field;
struct regmap *regmap;
- size_t array_size;
- u32 stride;
- u32 base;
int ret;
- int i;
syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
- if (!syscon) {
- dev_err(&pdev->dev, "no syscon property\n");
- return -ENODEV;
- }
+ if (!syscon)
+ return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ return regmap;
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, base);
if (ret < 0) {
dev_err(&pdev->dev, "no offset in syscon\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, stride);
if (ret < 0) {
dev_err(&pdev->dev, "no stride syscon\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
+ return regmap;
+}
+
+static const struct regmap_config tcsr_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40000,
+ .fast_io = true,
+};
+
+static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
+ u32 *offset, u32 *stride)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ /* All modern platform has offset 0 and stride of 4k */
+ *offset = 0;
+ *stride = 0x1000;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config);
+}
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank;
+ struct reg_field field;
+ struct regmap *regmap;
+ size_t array_size;
+ u32 stride;
+ u32 base;
+ int i;
+
+ regmap = qcom_hwspinlock_probe_syscon(pdev, &base, &stride);
+ if (IS_ERR(regmap) && PTR_ERR(regmap) == -ENODEV)
+ regmap = qcom_hwspinlock_probe_mmio(pdev, &base, &stride);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
if (!bank)
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 96544b348c27..7e642fb3ed15 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -346,10 +346,10 @@ static void debug_init_arch_data(void *info)
switch (mode) {
case EDDEVID_IMPL_FULL:
drvdata->edvidsr_present = true;
- /* Fall through */
+ fallthrough;
case EDDEVID_IMPL_EDPCSR_EDCIDSR:
drvdata->edcidsr_present = true;
- /* Fall through */
+ fallthrough;
case EDDEVID_IMPL_EDPCSR:
/*
* In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 6d7d2169bfb2..96425e818fc2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1382,7 +1382,6 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
return NOTIFY_BAD;
break;
case CPU_PM_EXIT:
- /* fallthrough */
case CPU_PM_ENTER_FAILED:
if (drvdata->state_needs_restore)
etm4_cpu_restore(drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 7040d583bed9..9ca3aaafcfbc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -84,9 +84,7 @@ u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
*/
switch (drvdata->memwidth) {
case TMC_MEM_INTF_WIDTH_32BITS:
- /* fallthrough */
case TMC_MEM_INTF_WIDTH_64BITS:
- /* fallthrough */
case TMC_MEM_INTF_WIDTH_128BITS:
mask = GENMASK(31, 4);
break;
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index a1529f571491..9ca8c4e045f8 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -84,11 +84,11 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
/* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
case STP_PACKET_GERR:
reg += 4;
- /* fall through */
+ fallthrough;
case STP_PACKET_XSYNC:
reg += 8;
- /* fall through */
+ fallthrough;
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 388978775be0..710fbef9a9c2 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -542,8 +542,8 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
}
EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
-MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
- "Wolfram Sang <kernel@pengutronix.de>");
+MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 88639e52c73a..293e7a0760e7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -146,6 +146,7 @@ config I2C_I801
Elkhart Lake (PCH)
Tiger Lake (PCH)
Jasper Lake (SOC)
+ Emmitsburg (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index a43deea390f5..fb93152845f4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -519,9 +519,9 @@ static struct pci_driver ali1535_driver = {
module_pci_driver(ali1535_driver);
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
- "Philip Edelbrock <phil@netroedge.com>, "
- "Mark D. Studebaker <mdsxyz123@yahoo.com> "
- "and Dan Eaton <dan.eaton@rocketlogix.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Dan Eaton <dan.eaton@rocketlogix.com>");
MODULE_DESCRIPTION("ALI1535 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 02185a1cfa77..cc58feacd082 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -502,8 +502,8 @@ static struct pci_driver ali15x3_driver = {
module_pci_driver(ali15x3_driver);
-MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, "
- "Philip Edelbrock <phil@netroedge.com>, "
- "and Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
MODULE_DESCRIPTION("ALI15X3 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2b14fef5bf26..34862ad3423e 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -381,7 +381,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
if (status)
return status;
len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
- /* fall through */
+ fallthrough;
case I2C_SMBUS_I2C_BLOCK_DATA:
for (i = 0; i < len; i++) {
status = amd_ec_read(smbus, AMD_SMB_DATA + i,
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index f51702d86a90..31268074c422 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -504,7 +504,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
goto error_and_stop;
}
irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
- /* fall through */
+ fallthrough;
case ASPEED_I2C_MASTER_TX_FIRST:
if (bus->buf_index < msg->len) {
bus->master_state = ASPEED_I2C_MASTER_TX;
@@ -520,7 +520,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
/* RX may not have completed yet (only address cycle) */
if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
goto out_no_complete;
- /* fall through */
+ fallthrough;
case ASPEED_I2C_MASTER_RX:
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
dev_err(bus->dev, "master failed to RX\n");
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 363d540a8345..66864f9cf7ac 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -816,79 +816,16 @@ error:
return ret;
}
-static void at91_prepare_twi_recovery(struct i2c_adapter *adap)
-{
- struct at91_twi_dev *dev = i2c_get_adapdata(adap);
-
- pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
-}
-
-static void at91_unprepare_twi_recovery(struct i2c_adapter *adap)
-{
- struct at91_twi_dev *dev = i2c_get_adapdata(adap);
-
- pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
-}
-
static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
struct at91_twi_dev *dev)
{
struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
- dev->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!dev->pinctrl || IS_ERR(dev->pinctrl)) {
+ rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
- return PTR_ERR(dev->pinctrl);
+ return PTR_ERR(rinfo->pinctrl);
}
-
- dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl,
- PINCTRL_STATE_DEFAULT);
- dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
- "gpio");
- if (IS_ERR(dev->pinctrl_pins_default) ||
- IS_ERR(dev->pinctrl_pins_gpio)) {
- dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
- return -EINVAL;
- }
-
- /*
- * pins will be taken as GPIO, so we might as well inform pinctrl about
- * this and move the state to GPIO
- */
- pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
-
- rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
- if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (IS_ERR(rinfo->sda_gpiod) ||
- IS_ERR(rinfo->scl_gpiod)) {
- dev_info(&pdev->dev, "recovery information incomplete\n");
- if (!IS_ERR(rinfo->sda_gpiod)) {
- gpiod_put(rinfo->sda_gpiod);
- rinfo->sda_gpiod = NULL;
- }
- if (!IS_ERR(rinfo->scl_gpiod)) {
- gpiod_put(rinfo->scl_gpiod);
- rinfo->scl_gpiod = NULL;
- }
- pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
- return -EINVAL;
- }
-
- /* change the state of the pins back to their default state */
- pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
-
- dev_info(&pdev->dev, "using scl, sda for recovery\n");
-
- rinfo->prepare_recovery = at91_prepare_twi_recovery;
- rinfo->unprepare_recovery = at91_unprepare_twi_recovery;
- rinfo->recover_bus = i2c_generic_scl_recovery;
dev->adapter.bus_recovery_info = rinfo;
return 0;
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
index 7e7b4955ca7f..eae673ae786c 100644
--- a/drivers/i2c/busses/i2c-at91.h
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -157,9 +157,6 @@ struct at91_twi_dev {
struct at91_twi_dma dma;
bool slave_detected;
struct i2c_bus_recovery_info rinfo;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pinctrl_pins_default;
- struct pinctrl_state *pinctrl_pins_gpio;
#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
unsigned smr;
struct i2c_client *slave;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 8a3c98866fb7..d8295b1c379d 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -720,7 +720,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
/* mark the last byte */
if (!process_call && (i == msg->len - 1))
- val |= 1 << M_TX_WR_STATUS_SHIFT;
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
@@ -738,7 +738,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
addr = i2c_8bit_addr_from_msg(msg);
/* mark it the last byte out */
- val = addr | (1 << M_TX_WR_STATUS_SHIFT);
+ val = addr | BIT(M_TX_WR_STATUS_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
@@ -1078,7 +1078,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
if (!iproc_i2c->slave)
return -EINVAL;
- iproc_i2c->slave = NULL;
+ disable_irq(iproc_i2c->irq);
/* disable all slave interrupts */
tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
@@ -1091,6 +1091,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+ /* flush TX/RX FIFOs */
+ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ iproc_i2c->slave = NULL;
+
+ enable_irq(iproc_i2c->irq);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d9b86fcc3825..5dc519516292 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -392,7 +392,7 @@ static const struct i2c_algorithm bcm2835_i2c_algo = {
/*
* The BCM2835 was reported to have problems with clock stretching:
- * http://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html
+ * https://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html
* https://www.raspberrypi.org/forums/viewtopic.php?p=146272
*/
static const struct i2c_adapter_quirks bcm2835_i2c_quirks = {
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 8522134f9ea9..55c83a7a24f3 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -90,7 +90,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
switch (pdev->device) {
case 0x0817:
dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
- /* fall through */
+ fallthrough;
case 0x0818:
case 0x0819:
c->bus_num = pdev->device - 0x817 + 3;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a71bc58fc03c..0dfeb2d11603 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -55,6 +55,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "HISI02A1", 0 },
{ "HISI02A2", 0 },
{ "HISI02A3", 0 },
+ { "HYGO0010", ACCESS_INTR_MASK },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 332f00437479..f67639dc74b7 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -187,7 +187,7 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
break;
}
i2c->state = STATE_WRITE;
- /* fall through */
+ fallthrough;
case STATE_WRITE:
if (i2c->msgbuf_ptr < i2c->msg->len)
dc_i2c_write_buf(i2c);
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 73f139690e4e..843b31a0f752 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -846,11 +846,10 @@ static void pch_i2c_remove(struct pci_dev *pdev)
kfree(adap_info);
}
-#ifdef CONFIG_PM
-static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_i2c_suspend(struct device *dev)
{
- int ret;
int i;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct adapter_info *adap_info = pci_get_drvdata(pdev);
void __iomem *p = adap_info->pch_data[0].pch_base_address;
@@ -872,34 +871,13 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
ioread32(p + PCH_I2CESRSTA));
- ret = pci_save_state(pdev);
-
- if (ret) {
- pch_pci_err(pdev, "pci_save_state\n");
- return ret;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int pch_i2c_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_i2c_resume(struct device *dev)
{
int i;
- struct adapter_info *adap_info = pci_get_drvdata(pdev);
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if (pci_enable_device(pdev) < 0) {
- pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n");
- return -EIO;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
+ struct adapter_info *adap_info = dev_get_drvdata(dev);
for (i = 0; i < adap_info->ch_num; i++)
pch_i2c_init(&adap_info->pch_data[i]);
@@ -908,18 +886,15 @@ static int pch_i2c_resume(struct pci_dev *pdev)
return 0;
}
-#else
-#define pch_i2c_suspend NULL
-#define pch_i2c_resume NULL
-#endif
+
+static SIMPLE_DEV_PM_OPS(pch_i2c_pm_ops, pch_i2c_suspend, pch_i2c_resume);
static struct pci_driver pch_pcidriver = {
.name = KBUILD_MODNAME,
.id_table = pch_pcidev_id,
.probe = pch_i2c_probe,
.remove = pch_i2c_remove,
- .suspend = pch_i2c_suspend,
- .resume = pch_i2c_resume
+ .driver.pm = &pch_i2c_pm_ops,
};
module_pci_driver(pch_pcidriver);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 1a319352e51b..a08554c1a570 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -442,6 +442,7 @@ static struct platform_driver em_i2c_driver = {
module_platform_driver(em_i2c_driver);
MODULE_DESCRIPTION("EMEV2 I2C bus driver");
-MODULE_AUTHOR("Ian Molton and Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_AUTHOR("Ian Molton");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, em_i2c_ids);
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index 977d6f524649..10332693edf0 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -703,7 +703,7 @@ static int fsi_i2c_probe(struct device *dev)
for (port_no = 0; port_no < ports; port_no++) {
np = fsi_i2c_find_port_of_node(dev->of_node, port_no);
- if (np && !of_device_is_available(np))
+ if (!of_device_is_available(np))
continue;
port = kzalloc(sizeof(*port), GFP_KERNEL);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index fea644921a76..e32ef3f01fe8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -54,6 +54,7 @@
* Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
* Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
* DNV (SOC) 0x19df 32 hard yes yes yes
+ * Emmitsburg (PCH) 0x1bc9 32 hard yes yes yes
* Broxton (SOC) 0x5ad4 32 hard yes yes yes
* Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
* Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
@@ -67,6 +68,7 @@
* Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes
* Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes
* Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes
+ * Tiger Lake-H (PCH) 0x43a3 32 hard yes yes yes
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
*
@@ -207,6 +209,7 @@
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
+#define PCI_DEVICE_ID_INTEL_EBG_SMBUS 0x1bc9
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -221,6 +224,7 @@
#define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4
#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
@@ -1062,6 +1066,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
@@ -1074,6 +1079,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ 0, }
};
@@ -1748,7 +1754,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
+ case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
@@ -1765,19 +1773,19 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
priv->features |= FEATURE_IDF;
- /* fall through */
+ fallthrough;
default:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
- /* fall through */
+ fallthrough;
case PCI_DEVICE_ID_INTEL_82801DB_3:
priv->features |= FEATURE_SMBUS_PEC;
priv->features |= FEATURE_BLOCK_BUFFER;
- /* fall through */
+ fallthrough;
case PCI_DEVICE_ID_INTEL_82801CA_3:
priv->features |= FEATURE_HOST_NOTIFY;
- /* fall through */
+ fallthrough;
case PCI_DEVICE_ID_INTEL_82801BA_2:
case PCI_DEVICE_ID_INTEL_82801AB_3:
case PCI_DEVICE_ID_INTEL_82801AA_3:
@@ -1986,7 +1994,8 @@ static void __exit i2c_i801_exit(void)
pci_unregister_driver(&i801_driver);
}
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I801 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index deef69e56906..efc14041d45b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -48,11 +48,13 @@
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
+#define I2C_DMA_ASYNC_MODE 0x0004
+#define I2C_DMA_SKIP_CONFIG 0x0010
+#define I2C_DMA_DIR_CHANGE 0x0200
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
#define I2C_DMA_HARD_RST 0x0002
-#define I2C_DMA_4G_MODE 0x0001
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -201,10 +203,11 @@ struct mtk_i2c_compatible {
unsigned char dcm: 1;
unsigned char auto_restart: 1;
unsigned char aux_len_reg: 1;
- unsigned char support_33bits: 1;
unsigned char timing_adjust: 1;
unsigned char dma_sync: 1;
unsigned char ltiming_adjust: 1;
+ unsigned char apdma_sync: 1;
+ unsigned char max_dma_support;
};
struct mtk_i2c_ac_timing {
@@ -250,14 +253,13 @@ struct mtk_i2c {
/**
* struct i2c_spec_values:
- * min_low_ns: min LOW period of the SCL clock
- * min_su_sta_ns: min set-up time for a repeated START condition
- * max_hd_dat_ns: max data hold time
- * min_su_dat_ns: min data set-up time
+ * @min_low_ns: min LOW period of the SCL clock
+ * @min_su_sta_ns: min set-up time for a repeated START condition
+ * @max_hd_dat_ns: max data hold time
+ * @min_su_dat_ns: min data set-up time
*/
struct i2c_spec_values {
unsigned int min_low_ns;
- unsigned int min_high_ns;
unsigned int min_su_sta_ns;
unsigned int max_hd_dat_ns;
unsigned int min_su_dat_ns;
@@ -307,10 +309,11 @@ static const struct mtk_i2c_compatible mt2712_compat = {
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
- .support_33bits = 1,
.timing_adjust = 1,
.dma_sync = 0,
.ltiming_adjust = 0,
+ .apdma_sync = 0,
+ .max_dma_support = 33,
};
static const struct mtk_i2c_compatible mt6577_compat = {
@@ -320,10 +323,11 @@ static const struct mtk_i2c_compatible mt6577_compat = {
.dcm = 1,
.auto_restart = 0,
.aux_len_reg = 0,
- .support_33bits = 0,
.timing_adjust = 0,
.dma_sync = 0,
.ltiming_adjust = 0,
+ .apdma_sync = 0,
+ .max_dma_support = 32,
};
static const struct mtk_i2c_compatible mt6589_compat = {
@@ -333,10 +337,11 @@ static const struct mtk_i2c_compatible mt6589_compat = {
.dcm = 0,
.auto_restart = 0,
.aux_len_reg = 0,
- .support_33bits = 0,
.timing_adjust = 0,
.dma_sync = 0,
.ltiming_adjust = 0,
+ .apdma_sync = 0,
+ .max_dma_support = 32,
};
static const struct mtk_i2c_compatible mt7622_compat = {
@@ -346,10 +351,11 @@ static const struct mtk_i2c_compatible mt7622_compat = {
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
- .support_33bits = 0,
.timing_adjust = 0,
.dma_sync = 0,
.ltiming_adjust = 0,
+ .apdma_sync = 0,
+ .max_dma_support = 32,
};
static const struct mtk_i2c_compatible mt8173_compat = {
@@ -358,10 +364,11 @@ static const struct mtk_i2c_compatible mt8173_compat = {
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
- .support_33bits = 1,
.timing_adjust = 0,
.dma_sync = 0,
.ltiming_adjust = 0,
+ .apdma_sync = 0,
+ .max_dma_support = 33,
};
static const struct mtk_i2c_compatible mt8183_compat = {
@@ -371,10 +378,25 @@ static const struct mtk_i2c_compatible mt8183_compat = {
.dcm = 0,
.auto_restart = 1,
.aux_len_reg = 1,
- .support_33bits = 1,
.timing_adjust = 1,
.dma_sync = 1,
.ltiming_adjust = 1,
+ .apdma_sync = 0,
+ .max_dma_support = 33,
+};
+
+static const struct mtk_i2c_compatible mt8192_compat = {
+ .quirks = &mt8183_i2c_quirks,
+ .regs = mt_i2c_regs_v2,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .timing_adjust = 1,
+ .dma_sync = 1,
+ .ltiming_adjust = 1,
+ .apdma_sync = 1,
+ .max_dma_support = 36,
};
static const struct of_device_id mtk_i2c_of_match[] = {
@@ -384,6 +406,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
+ { .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
{}
};
MODULE_DEVICE_TABLE(of, mtk_i2c_of_match);
@@ -786,11 +809,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
return 0;
}
-static inline u32 mtk_i2c_set_4g_mode(dma_addr_t addr)
-{
- return (addr & BIT_ULL(32)) ? I2C_DMA_4G_MODE : I2C_DMA_CLR_FLAG;
-}
-
static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
int num, int left_num)
{
@@ -798,6 +816,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
u16 start_reg;
u16 control_reg;
u16 restart_flag = 0;
+ u16 dma_sync = 0;
u32 reg_4g_mode;
u8 *dma_rd_buf = NULL;
u8 *dma_wr_buf = NULL;
@@ -851,10 +870,16 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN);
}
+ if (i2c->dev_comp->apdma_sync) {
+ dma_sync = I2C_DMA_SKIP_CONFIG | I2C_DMA_ASYNC_MODE;
+ if (i2c->op == I2C_MASTER_WRRD)
+ dma_sync |= I2C_DMA_DIR_CHANGE;
+ }
+
/* Prepare buffer data to start transfer */
if (i2c->op == I2C_MASTER_RD) {
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
- writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
+ writel(I2C_DMA_CON_RX | dma_sync, i2c->pdmabase + OFFSET_CON);
dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_rd_buf)
@@ -868,8 +893,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
return -ENOMEM;
}
- if (i2c->dev_comp->support_33bits) {
- reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ if (i2c->dev_comp->max_dma_support > 32) {
+ reg_4g_mode = upper_32_bits(rpaddr);
writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
}
@@ -877,7 +902,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN);
} else if (i2c->op == I2C_MASTER_WR) {
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
- writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
+ writel(I2C_DMA_CON_TX | dma_sync, i2c->pdmabase + OFFSET_CON);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
@@ -891,8 +916,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
return -ENOMEM;
}
- if (i2c->dev_comp->support_33bits) {
- reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ if (i2c->dev_comp->max_dma_support > 32) {
+ reg_4g_mode = upper_32_bits(wpaddr);
writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
}
@@ -900,7 +925,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
} else {
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
+ writel(I2C_DMA_CLR_FLAG | dma_sync, i2c->pdmabase + OFFSET_CON);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
@@ -937,11 +962,11 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
return -ENOMEM;
}
- if (i2c->dev_comp->support_33bits) {
- reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ if (i2c->dev_comp->max_dma_support > 32) {
+ reg_4g_mode = upper_32_bits(wpaddr);
writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
- reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ reg_4g_mode = upper_32_bits(rpaddr);
writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
}
@@ -1215,8 +1240,9 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (i2c->dev_comp->support_33bits) {
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(33));
+ if (i2c->dev_comp->max_dma_support > 32) {
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(i2c->dev_comp->max_dma_support));
if (ret) {
dev_err(&pdev->dev, "dma_set_mask return error.\n");
return ret;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 829b8c98ae51..8d9d4ffdcd24 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -251,7 +251,7 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK;
break;
}
- /* FALLTHRU */
+ fallthrough;
case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */
case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */
if ((drv_data->bytes_left == 0)
@@ -282,14 +282,14 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK;
break;
}
- /* FALLTHRU */
+ fallthrough;
case MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK: /* 0xe0 */
if (drv_data->bytes_left == 0) {
drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
drv_data->state = MV64XXX_I2C_STATE_IDLE;
break;
}
- /* FALLTHRU */
+ fallthrough;
case MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK: /* 0x50 */
if (status != MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK)
drv_data->action = MV64XXX_I2C_ACTION_CONTINUE;
@@ -417,8 +417,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
"mv64xxx_i2c_do_action: Invalid action: %d\n",
drv_data->action);
drv_data->rc = -EIO;
-
- /* FALLTHRU */
+ fallthrough;
case MV64XXX_I2C_ACTION_SEND_STOP:
drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index e1e8d4ef9aa7..d4b1b0865f67 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1122,6 +1122,7 @@ static void __exit nmk_i2c_exit(void)
subsys_initcall(nmk_i2c_init);
module_exit(nmk_i2c_exit);
-MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR");
+MODULE_AUTHOR("Sachin Verma");
+MODULE_AUTHOR("Srinidhi KASAGAR");
MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 175c590b93b7..12ac4212aded 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1425,7 +1425,6 @@ omap_i2c_probe(struct platform_device *pdev)
major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
break;
case OMAP_I2C_SCHEME_1:
- /* FALLTHROUGH */
default:
omap->regs = (u8 *)reg_map_ip_v2;
rev = (rev << 16) |
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 1c4c9bb06a0b..6eb0f50c5d28 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -125,7 +125,7 @@ static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_BYTE:
req.buffer_ra = cpu_to_be64(__pa(&data->byte));
req.size = cpu_to_be32(1);
- /* Fall through */
+ fallthrough;
case I2C_SMBUS_QUICK:
req.type = (read_write == I2C_SMBUS_READ) ?
OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 69740a4ff1db..8c1b31ed0c42 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -1032,7 +1032,7 @@ static struct pci_driver piix4_driver = {
module_pci_driver(piix4_driver);
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
- "Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("PIIX4 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 5d7207c10f1d..8c4ec7f13f5a 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -781,7 +781,8 @@ static void __exit i2c_adap_pnx_exit(void)
platform_driver_unregister(&i2c_pnx_driver);
}
-MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_AUTHOR("Dennis Kovalev <source@mvista.com>");
MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pnx-i2c");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2e3e1bb75013..c7c543483b08 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -583,13 +583,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
- rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
+ /* Clear SSR, too, because of old STOPs to other clients than us */
+ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
}
/* master sent stop */
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
- rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
@@ -853,7 +855,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
priv->slave = slave;
rcar_i2c_write(priv, ICSAR, slave->addr);
rcar_i2c_write(priv, ICSSR, 0);
- rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSCR, SIE | SDBS);
return 0;
@@ -865,12 +867,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
WARN_ON(!priv->slave);
- /* disable irqs and ensure none is running before clearing ptr */
+ /* ensure no irq is running before clearing ptr */
+ disable_irq(priv->irq);
rcar_i2c_write(priv, ICSIER, 0);
- rcar_i2c_write(priv, ICSCR, 0);
+ rcar_i2c_write(priv, ICSSR, 0);
+ enable_irq(priv->irq);
+ rcar_i2c_write(priv, ICSCR, SDBS);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
- synchronize_irq(priv->irq);
priv->slave = NULL;
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 15324bfbc6cb..8e3cc85d1921 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -1040,8 +1041,21 @@ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
return ret;
}
-static int rk3x_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
+static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), WAIT_TIMEOUT);
+
+ while (READ_ONCE(i2c->busy) &&
+ ktime_compare(ktime_get(), timeout) < 0) {
+ udelay(5);
+ rk3x_i2c_irq(0, i2c);
+ }
+
+ return !i2c->busy;
+}
+
+static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num, bool polling)
{
struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
unsigned long timeout, flags;
@@ -1075,8 +1089,12 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
rk3x_i2c_start(i2c);
- timeout = wait_event_timeout(i2c->wait, !i2c->busy,
- msecs_to_jiffies(WAIT_TIMEOUT));
+ if (!polling) {
+ timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+ msecs_to_jiffies(WAIT_TIMEOUT));
+ } else {
+ timeout = rk3x_i2c_wait_xfer_poll(i2c);
+ }
spin_lock_irqsave(&i2c->lock, flags);
@@ -1110,6 +1128,18 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
return ret < 0 ? ret : num;
}
+static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return rk3x_i2c_xfer_common(adap, msgs, num, false);
+}
+
+static int rk3x_i2c_xfer_polling(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return rk3x_i2c_xfer_common(adap, msgs, num, true);
+}
+
static __maybe_unused int rk3x_i2c_resume(struct device *dev)
{
struct rk3x_i2c *i2c = dev_get_drvdata(dev);
@@ -1126,6 +1156,7 @@ static u32 rk3x_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm rk3x_i2c_algorithm = {
.master_xfer = rk3x_i2c_xfer,
+ .master_xfer_atomic = rk3x_i2c_xfer_polling,
.functionality = rk3x_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 2cca1b21e26e..cab725559999 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -932,6 +932,7 @@ static void __exit sh_mobile_i2c_adap_exit(void)
module_exit(sh_mobile_i2c_adap_exit);
MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
-MODULE_AUTHOR("Magnus Damm and Wolfram Sang");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_AUTHOR("Wolfram Sang");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:i2c-sh_mobile");
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 9dcea2ba7168..8f71f01cb169 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -180,6 +180,7 @@ static void __exit i2c_sibyte_exit(void)
module_init(i2c_sibyte_init);
module_exit(i2c_sibyte_exit);
-MODULE_AUTHOR("Kip Walker (Broadcom Corp.), Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Kip Walker (Broadcom Corp.)");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index d7f72ec331e8..30db8fafe078 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -470,6 +470,6 @@ static struct platform_driver i2c_sirfsoc_driver = {
module_platform_driver(i2c_sirfsoc_driver);
MODULE_DESCRIPTION("SiRF SoC I2C master controller driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
- "Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
+MODULE_AUTHOR("Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index c9a3dba6a75d..31be1811d5e6 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -398,8 +398,7 @@ static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id)
if (i2c->state == STATE_READ)
goto prepare_read;
-
- /* fall through */
+ fallthrough;
case STATE_WRITE:
if (bsr & SYNQUACER_I2C_BSR_LRB) {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 15772964a05f..00d3e4d7a01e 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -293,6 +293,8 @@ struct tegra_i2c_dev {
bool is_curr_atomic_xfer;
};
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit);
+
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
unsigned long reg)
{
@@ -419,7 +421,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
dma_addr_t dma_phys;
int err;
- if (!i2c_dev->hw->has_apb_dma)
+ if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi)
return 0;
if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
@@ -655,32 +657,47 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
if (ret)
return ret;
- if (!i2c_dev->hw->has_single_clk_source) {
- ret = clk_enable(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev,
- "Enabling fast clk failed, err %d\n", ret);
- return ret;
- }
+ ret = clk_enable(i2c_dev->fast_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev,
+ "Enabling fast clk failed, err %d\n", ret);
+ return ret;
}
- if (i2c_dev->slow_clk) {
- ret = clk_enable(i2c_dev->slow_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable slow clock: %d\n", ret);
- return ret;
- }
+ ret = clk_enable(i2c_dev->slow_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable slow clock: %d\n", ret);
+ goto disable_fast_clk;
}
ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev,
"Enabling div clk failed, err %d\n", ret);
- clk_disable(i2c_dev->fast_clk);
- return ret;
+ goto disable_slow_clk;
+ }
+
+ /*
+ * VI I2C device is attached to VE power domain which goes through
+ * power ON/OFF during PM runtime resume/suspend. So, controller
+ * should go through reset and need to re-initialize after power
+ * domain ON.
+ */
+ if (i2c_dev->is_vi) {
+ ret = tegra_i2c_init(i2c_dev, true);
+ if (ret)
+ goto disable_div_clk;
}
return 0;
+
+disable_div_clk:
+ clk_disable(i2c_dev->div_clk);
+disable_slow_clk:
+ clk_disable(i2c_dev->slow_clk);
+disable_fast_clk:
+ clk_disable(i2c_dev->fast_clk);
+ return ret;
}
static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
@@ -688,12 +705,8 @@ static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
clk_disable(i2c_dev->div_clk);
-
- if (i2c_dev->slow_clk)
- clk_disable(i2c_dev->slow_clk);
-
- if (!i2c_dev->hw->has_single_clk_source)
- clk_disable(i2c_dev->fast_clk);
+ clk_disable(i2c_dev->slow_clk);
+ clk_disable(i2c_dev->fast_clk);
return pinctrl_pm_select_idle_state(i2c_dev->dev);
}
@@ -1716,20 +1729,16 @@ static int tegra_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c_dev);
- if (!i2c_dev->hw->has_single_clk_source) {
- ret = clk_prepare(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
- return ret;
- }
+ ret = clk_prepare(i2c_dev->fast_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+ return ret;
}
- if (i2c_dev->slow_clk) {
- ret = clk_prepare(i2c_dev->slow_clk);
- if (ret < 0) {
- dev_err(dev, "failed to prepare slow clock: %d\n", ret);
- goto unprepare_fast_clk;
- }
+ ret = clk_prepare(i2c_dev->slow_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare slow clock: %d\n", ret);
+ goto unprepare_fast_clk;
}
if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ &&
@@ -1750,7 +1759,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto unprepare_slow_clk;
}
- pm_runtime_irq_safe(&pdev->dev);
+ /*
+ * VI I2C is in VE power domain which is not always on and not
+ * an IRQ safe. So, IRQ safe device can't be attached to a non-IRQ
+ * safe domain as it prevents powering off the PM domain.
+ * Also, VI I2C device don't need to use runtime IRQ safe as it will
+ * not be used for atomic transfers.
+ */
+ if (!i2c_dev->is_vi)
+ pm_runtime_irq_safe(&pdev->dev);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_i2c_runtime_resume(&pdev->dev);
@@ -1835,12 +1852,10 @@ unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
unprepare_slow_clk:
- if (i2c_dev->is_vi)
- clk_unprepare(i2c_dev->slow_clk);
+ clk_unprepare(i2c_dev->slow_clk);
unprepare_fast_clk:
- if (!i2c_dev->hw->has_single_clk_source)
- clk_unprepare(i2c_dev->fast_clk);
+ clk_unprepare(i2c_dev->fast_clk);
return ret;
}
@@ -1859,12 +1874,8 @@ static int tegra_i2c_remove(struct platform_device *pdev)
tegra_i2c_runtime_suspend(&pdev->dev);
clk_unprepare(i2c_dev->div_clk);
-
- if (i2c_dev->slow_clk)
- clk_unprepare(i2c_dev->slow_clk);
-
- if (!i2c_dev->hw->has_single_clk_source)
- clk_unprepare(i2c_dev->fast_clk);
+ clk_unprepare(i2c_dev->slow_clk);
+ clk_unprepare(i2c_dev->fast_clk);
tegra_i2c_release_dma(i2c_dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 4abc7771af06..970ccdcbb889 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -228,7 +228,7 @@ static s32 vt596_access(struct i2c_adapter *adap, u16 addr,
goto exit_unsupported;
if (read_write == I2C_SMBUS_READ)
outb_p(data->block[0], SMBHSTDAT0);
- /* Fall through */
+ fallthrough;
case I2C_SMBUS_BLOCK_DATA:
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
@@ -489,9 +489,9 @@ static void __exit i2c_vt596_exit(void)
}
}
-MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
- "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
- "Jean Delvare <jdelvare@suse.de>");
+MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("vt82c596 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index bd9afa383d12..7b42a18bd05c 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -151,7 +151,7 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status)
case state_repeat_start:
outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1);
- /* fallthrough */
+ fallthrough;
case state_quick:
if (iface->address_byte & 1) {
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 2ade99b105b9..e627d7b2790f 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -276,16 +276,6 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
-const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client)
-{
- if (!(client && matches))
- return NULL;
-
- return acpi_match_device(matches, &client->dev);
-}
-
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
/*
* These Silead touchscreen controllers only work at 400KHz, for
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 26f03a14a478..5ec082e2039d 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -181,6 +182,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
+ if (bri->pinctrl)
+ pinctrl_select_state(bri->pinctrl, bri->pins_gpio);
/*
* If we can set SDA, we will always create a STOP to ensure additional
@@ -236,6 +239,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
if (bri->unprepare_recovery)
bri->unprepare_recovery(adap);
+ if (bri->pinctrl)
+ pinctrl_select_state(bri->pinctrl, bri->pins_default);
return ret;
}
@@ -251,13 +256,135 @@ int i2c_recover_bus(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_recover_bus);
-static void i2c_init_recovery(struct i2c_adapter *adap)
+static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ struct device *dev = &adap->dev;
+ struct pinctrl *p = bri->pinctrl;
+
+ /*
+ * we can't change states without pinctrl, so remove the states if
+ * populated
+ */
+ if (!p) {
+ bri->pins_default = NULL;
+ bri->pins_gpio = NULL;
+ return;
+ }
+
+ if (!bri->pins_default) {
+ bri->pins_default = pinctrl_lookup_state(p,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(bri->pins_default)) {
+ dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n");
+ bri->pins_default = NULL;
+ }
+ }
+ if (!bri->pins_gpio) {
+ bri->pins_gpio = pinctrl_lookup_state(p, "gpio");
+ if (IS_ERR(bri->pins_gpio))
+ bri->pins_gpio = pinctrl_lookup_state(p, "recovery");
+
+ if (IS_ERR(bri->pins_gpio)) {
+ dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n");
+ bri->pins_gpio = NULL;
+ }
+ }
+
+ /* for pinctrl state changes, we need all the information */
+ if (bri->pins_default && bri->pins_gpio) {
+ dev_info(dev, "using pinctrl states for GPIO recovery");
+ } else {
+ bri->pinctrl = NULL;
+ bri->pins_default = NULL;
+ bri->pins_gpio = NULL;
+ }
+}
+
+static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ struct device *dev = &adap->dev;
+ struct gpio_desc *gpiod;
+ int ret = 0;
+
+ /*
+ * don't touch the recovery information if the driver is not using
+ * generic SCL recovery
+ */
+ if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery)
+ return 0;
+
+ /*
+ * pins might be taken as GPIO, so we should inform pinctrl about
+ * this and move the state to GPIO
+ */
+ if (bri->pinctrl)
+ pinctrl_select_state(bri->pinctrl, bri->pins_gpio);
+
+ /*
+ * if there is incomplete or no recovery information, see if generic
+ * GPIO recovery is available
+ */
+ if (!bri->scl_gpiod) {
+ gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (PTR_ERR(gpiod) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto cleanup_pinctrl_state;
+ }
+ if (!IS_ERR(gpiod)) {
+ bri->scl_gpiod = gpiod;
+ bri->recover_bus = i2c_generic_scl_recovery;
+ dev_info(dev, "using generic GPIOs for recovery\n");
+ }
+ }
+
+ /* SDA GPIOD line is optional, so we care about DEFER only */
+ if (!bri->sda_gpiod) {
+ /*
+ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
+ * have no effect.
+ */
+ gpiod_direction_output(bri->scl_gpiod, 0);
+ udelay(10);
+ gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN);
+
+ /* Wait a bit in case of a SDA glitch, and then release SCL. */
+ udelay(10);
+ gpiod_direction_output(bri->scl_gpiod, 1);
+
+ if (PTR_ERR(gpiod) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto cleanup_pinctrl_state;
+ }
+ if (!IS_ERR(gpiod))
+ bri->sda_gpiod = gpiod;
+ }
+
+cleanup_pinctrl_state:
+ /* change the state of the pins back to their default state */
+ if (bri->pinctrl)
+ pinctrl_select_state(bri->pinctrl, bri->pins_default);
+
+ return ret;
+}
+
+static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
+{
+ i2c_gpio_init_pinctrl_recovery(adap);
+ return i2c_gpio_init_generic_recovery(adap);
+}
+
+static int i2c_init_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
char *err_str;
if (!bri)
- return;
+ return 0;
+
+ if (i2c_gpio_init_recovery(adap) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
if (!bri->recover_bus) {
err_str = "no recover_bus() found";
@@ -273,10 +400,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
if (gpiod_get_direction(bri->sda_gpiod) == 0)
bri->set_sda = set_sda_gpio_value;
}
- return;
- }
-
- if (bri->recover_bus == i2c_generic_scl_recovery) {
+ } else if (bri->recover_bus == i2c_generic_scl_recovery) {
/* Generic SCL recovery */
if (!bri->set_scl || !bri->get_scl) {
err_str = "no {get|set}_scl() found";
@@ -288,10 +412,12 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
}
}
- return;
+ return 0;
err:
dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
adap->bus_recovery_info = NULL;
+
+ return -EINVAL;
}
static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
@@ -319,11 +445,9 @@ static int i2c_device_probe(struct device *dev)
if (!client)
return 0;
- driver = to_i2c_driver(dev->driver);
-
client->irq = client->init_irq;
- if (!client->irq && !driver->disable_i2c_core_irq_mapping) {
+ if (!client->irq) {
int irq = -ENOENT;
if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
@@ -349,12 +473,14 @@ static int i2c_device_probe(struct device *dev)
client->irq = irq;
}
+ driver = to_i2c_driver(dev->driver);
+
/*
* An I2C ID table is not mandatory, if and only if, a suitable OF
* or ACPI ID table is supplied for the probing device.
*/
if (!driver->id_table &&
- !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
+ !acpi_driver_match_device(dev, dev->driver) &&
!i2c_of_match_device(dev->driver->of_match_table, client)) {
status = -ENODEV;
goto put_sync_adapter;
@@ -1227,7 +1353,7 @@ static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
return 0;
- domain = irq_domain_create_linear(adap->dev.fwnode,
+ domain = irq_domain_create_linear(adap->dev.parent->fwnode,
I2C_ADDR_7BITS_COUNT,
&i2c_host_notify_irq_ops, adap);
if (!domain)
@@ -1318,12 +1444,16 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (res)
goto out_reg;
- dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
-
pm_runtime_no_callbacks(&adap->dev);
pm_suspend_ignore_children(&adap->dev, true);
pm_runtime_enable(&adap->dev);
+ res = i2c_init_recovery(adap);
+ if (res == -EPROBE_DEFER)
+ goto out_reg;
+
+ dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+
#ifdef CONFIG_I2C_COMPAT
res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
adap->dev.parent);
@@ -1332,8 +1462,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
"Failed to create compatibility class link\n");
#endif
- i2c_init_recovery(adap);
-
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
i2c_acpi_register_devices(adap);
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 94ff1693b391..8ce261167a2d 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -59,20 +59,11 @@ static inline int __i2c_check_suspended(struct i2c_adapter *adap)
}
#ifdef CONFIG_ACPI
-const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap);
int i2c_acpi_get_irq(struct i2c_client *client);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
-static inline const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client)
-{
- return NULL;
-}
static inline int i2c_acpi_get_irq(struct i2c_client *client)
{
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index da020acc9bbd..6ceb11cc4be1 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -761,8 +761,8 @@ static void __exit i2c_dev_exit(void)
unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
- "Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C /dev entries driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 593f2fd39d17..5c7ae421cacf 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -66,7 +66,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
case I2C_SLAVE_READ_PROCESSED:
/* The previous byte made it to the bus, get next one */
eeprom->buffer_idx++;
- /* fallthrough */
+ fallthrough;
case I2C_SLAVE_READ_REQUESTED:
spin_lock(&eeprom->buffer_lock);
*val = eeprom->buffer[eeprom->buffer_idx & eeprom->address_mask];
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 5c5306cd50ec..8513bd353c05 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -603,7 +603,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
- /* fall through */
+ fallthrough;
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index fd3b5da44619..50c9a41467c8 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -575,14 +575,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
if (!HPT370_ALLOW_ATA100_5 ||
check_in_drive_list(drive, bad_ata100_5))
return ATA_UDMA4;
- /* fall through */
+ fallthrough;
case HPT372 :
case HPT372A:
case HPT372N:
case HPT374 :
if (ata_id_is_sata(drive->id))
mask &= ~0x0e;
- /* fall through */
+ fallthrough;
default:
return mask;
}
@@ -602,7 +602,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
case HPT374 :
if (ata_id_is_sata(drive->id))
return 0x00;
- /* fall through */
+ fallthrough;
default:
return 0x07;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 7f17f8303988..212bb2d8bf34 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -350,7 +350,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
*/
if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
break;
- /* fall-through */
+ fallthrough;
case DATA_PROTECT:
/*
* No point in retrying after an illegal request or data
@@ -750,7 +750,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
expiry = ide_cd_expiry;
- /*FALLTHRU*/
+ fallthrough;
default:
timeout = ATAPI_WAIT_PC;
break;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 1fe1f9d37a51..af7503b47dbe 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -428,7 +428,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
* (maintains previous driver behaviour)
*/
break;
- /* fall through */
+ fallthrough;
case CAPACITY_CURRENT:
/* Normal Zip/LS-120 disks */
if (memcmp(cap_desc, &floppy->cap_desc, 8))
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index e867129466b0..1ddc45a04418 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -143,7 +143,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive)
}
/* Early cdrom models used zero */
type = ide_cdrom;
- /* fall through */
+ fallthrough;
case ide_cdrom:
drive->dev_flags |= IDE_DFLAG_REMOVABLE;
#ifdef CONFIG_PPC
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a26f85ab58a9..d016cbe68cba 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -129,7 +129,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
return pre_task_out_intr(drive, cmd);
}
handler = task_pio_intr;
- /* fall through */
+ fallthrough;
case ATA_PROT_NODATA:
if (handler == NULL)
handler = task_no_data_intr;
@@ -141,7 +141,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
hwif->expiry = dma_ops->dma_timer_expiry;
ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
dma_ops->dma_start(drive);
- /* fall through */
+ fallthrough;
default:
return ide_started;
}
@@ -579,10 +579,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
goto abort;
}
cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_OUT:
cmd.protocol = ATA_PROT_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_OUT_DMAQ:
case TASKFILE_OUT_DMA:
cmd.tf_flags |= IDE_TFLAG_WRITE;
@@ -598,10 +598,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
goto abort;
}
cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_IN:
cmd.protocol = ATA_PROT_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_IN_DMAQ:
case TASKFILE_IN_DMA:
nsect = taskin / SECTOR_SIZE;
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 024bc7ba49ee..1a700bef6c56 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -494,7 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev)
pci_read_config_byte(dev, 0x09, &reg);
if ((reg & 0x0f) != 0x00)
pci_write_config_byte(dev, 0x09, reg&0xf0);
- /* fall through */
+ fallthrough;
case ATA_16:
/* force per drive recovery and active timings
needed on ATA_33 and below chips */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8e0fb1a5bdbd..9a810e4a7946 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -90,14 +90,6 @@ static unsigned int mwait_substates __initdata;
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
- * Set this flag for states where the HW flushes the TLB for us
- * and so we don't need cross-calls to keep it consistent.
- * If this flag is set, SW flushes the TLB, so even if the
- * HW doesn't do the flushing, this flag is safe to use.
- */
-#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
-
-/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
bool tick;
- int cpu = smp_processor_id();
-
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
if (!static_cpu_has(X86_FEATURE_ARAT)) {
/*
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index ba27f8673131..4e6e70250048 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1580,7 +1580,7 @@ static int mma8452_probe(struct i2c_client *client,
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
- /* fall through */
+ fallthrough;
default:
ret = -ENODEV;
goto disable_regulators;
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 7fdc5d2d1d35..1bb987a4acba 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -484,7 +484,7 @@ static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
delay_max = 10000; /* large range optimises sleepmode */
break;
}
- /* Fall through */
+ fallthrough;
default:
ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA;
break;
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 84a1733e5913..64c3cc382311 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -690,7 +690,7 @@ static void cpcap_adc_phase(struct cpcap_adc_request *req)
break;
case CPCAP_ADC_BATTI_PI17:
index = req->bank_index;
- /* fallthrough */
+ fallthrough;
default:
req->result += conv_tbl[index].cal_offset;
req->result += conv_tbl[index].align_offset;
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index 5a29e32c295f..2ea9a5c4d846 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -118,7 +118,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
case SPS30_READ_AUTO_CLEANING_PERIOD:
buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
- /* fall through */
+ fallthrough;
case SPS30_READ_DATA_READY_FLAG:
case SPS30_READ_DATA:
case SPS30_READ_SERIAL:
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index cc4875660a69..1fd75c02a7cd 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -220,7 +220,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
break;
case CH_MODE_UNUSED:
- /* fall-through */
default:
switch (st->channel_offstate[i]) {
case CH_OFFSTATE_OUT_TRISTATE:
@@ -237,7 +236,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
break;
case CH_OFFSTATE_PULLDOWN:
- /* fall-through */
default:
pulldown |= BIT(i);
break;
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index b3835fb6b862..1a9609eda5c5 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -74,11 +74,12 @@ static int dpot_dac_read_raw(struct iio_dev *indio_dev,
case IIO_VAL_INT:
/*
* Convert integer scale to fractional scale by
- * setting the denominator (val2) to one, and...
+ * setting the denominator (val2) to one...
*/
*val2 = 1;
ret = IIO_VAL_FRACTIONAL;
- /* fall through */
+ /* ...and fall through. Say it again for GCC. */
+ fallthrough;
case IIO_VAL_FRACTIONAL:
*val *= regulator_get_voltage(dac->vref) / 1000;
*val2 *= dac->max_ohms;
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 9b47d9472a4f..d9b2ed80882a 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -273,10 +273,10 @@ static int max30102_read_measurement(struct max30102_data *data,
switch (measurements) {
case 3:
MAX30102_COPY_DATA(2);
- /* fall through */
+ fallthrough;
case 2:
MAX30102_COPY_DATA(1);
- /* fall through */
+ fallthrough;
case 1:
MAX30102_COPY_DATA(0);
break;
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index c539dfa3b8d3..319b64b2fd88 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -97,11 +97,11 @@ int __adis_write_reg(struct adis *adis, unsigned int reg,
adis->tx[9] = (value >> 24) & 0xff;
adis->tx[6] = ADIS_WRITE_REG(reg + 2);
adis->tx[7] = (value >> 16) & 0xff;
- /* fall through */
+ fallthrough;
case 2:
adis->tx[4] = ADIS_WRITE_REG(reg + 1);
adis->tx[5] = (value >> 8) & 0xff;
- /* fall through */
+ fallthrough;
case 1:
adis->tx[2] = ADIS_WRITE_REG(reg);
adis->tx[3] = value & 0xff;
@@ -191,7 +191,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
adis->tx[2] = ADIS_READ_REG(reg + 2);
adis->tx[3] = 0;
spi_message_add_tail(&xfers[1], &msg);
- /* fall through */
+ fallthrough;
case 2:
adis->tx[4] = ADIS_READ_REG(reg);
adis->tx[5] = 0;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 606d5e61c575..cdcd16f19500 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -599,7 +599,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
return scnprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
- /* fall through */
+ fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
@@ -918,7 +918,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
break;
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
- /* fall through */
+ fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
fract_mult = 100000;
break;
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 155faaea8c72..8f5f857c2e7d 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -1042,7 +1042,7 @@ static int si1145_initialize(struct si1145_data *data)
SI1145_LED_CURRENT_45mA);
if (ret < 0)
return ret;
- /* fallthrough */
+ fallthrough;
case 2:
ret = i2c_smbus_write_byte_data(client,
SI1145_REG_PS_LED21,
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 6a8ae145f0c0..cbb44e401c0a 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -499,7 +499,7 @@ static int ak8974_detect(struct ak8974 *ak8974)
switch (whoami) {
case AK8974_WHOAMI_VALUE_AMI306:
name = "ami306";
- /* fall-through */
+ fallthrough;
case AK8974_WHOAMI_VALUE_AMI305:
ret = regmap_read(ak8974->map, AMI305_VER, &fw);
if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dc0558b23158..fbc28f1a8b92 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3034,7 +3034,7 @@ static int cm_rej_handler(struct cm_work *work)
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ fallthrough;
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
@@ -3044,7 +3044,7 @@ static int cm_rej_handler(struct cm_work *work)
break;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ fallthrough;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_enter_timewait(cm_id_priv);
@@ -3058,7 +3058,7 @@ static int cm_rej_handler(struct cm_work *work)
cm_enter_timewait(cm_id_priv);
break;
}
- /* fall through */
+ fallthrough;
default:
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
@@ -3116,7 +3116,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
- /* fall through */
+ fallthrough;
default:
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
@@ -3227,7 +3227,7 @@ static int cm_mra_handler(struct cm_work *work)
case IB_CM_MRA_REP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
- /* fall through */
+ fallthrough;
default:
pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
@@ -4214,7 +4214,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_TGT:
*qp_attr_mask |= IB_QP_TIMEOUT;
qp_attr->timeout = cm_id_priv->av.timeout;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26de0dab60bb..7f0e91e92968 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1985,7 +1985,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IB_CM_DREQ_ERROR:
- event.status = -ETIMEDOUT; /* fall through */
+ event.status = -ETIMEDOUT;
+ fallthrough;
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ef0cd2998671..c36b4d2b61e0 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2751,7 +2751,7 @@ static int __init ib_core_init(void)
ret = addr_init();
if (ret) {
- pr_warn("Could't init IB address resolution\n");
+ pr_warn("Couldn't init IB address resolution\n");
goto err_ibnl;
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 614cff89fc71..13f43ab7220b 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -510,7 +510,6 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
switch (ctx->type) {
case RDMA_RW_SIG_MR:
case RDMA_RW_MR:
- /* fallthrough */
for (i = 0; i < ctx->nr_ops; i++) {
rdma_rw_update_lkey(&ctx->reg[i],
ctx->reg[i].wr.wr.opcode !=
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index d03dacaef788..1d184ea05eba 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -794,7 +794,7 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
@@ -820,7 +820,7 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 5e32f61a2fe4..cc6b4befde7c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* complex (and doesn't gain us much performance in most use
* cases).
*/
- npages = get_user_pages_remote(owning_process, owning_mm,
+ npages = get_user_pages_remote(owning_mm,
user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL);
mmap_read_unlock(owning_mm);
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index ef04a261097f..e47c5949013f 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -259,7 +259,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
return -EOPNOTSUPP;
e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
- /* fall through */
+ fallthrough;
case UVERBS_ATTR_TYPE_PTR_IN:
/* Ensure that any data provided by userspace beyond the known
* struct is zero. Userspace that knows how to use some future
@@ -271,7 +271,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
!uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
return -EOPNOTSUPP;
- /* fall through */
+ fallthrough;
case UVERBS_ATTR_TYPE_PTR_OUT:
if (uattr->len < val_spec->u.ptr.min_len ||
(!val_spec->zero_trailing &&
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 3f18efc0c297..5ee272d27aaa 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -2657,7 +2657,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
default:
break;
}
- /* fall through */
+ fallthrough;
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
break;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index dad0df8a2467..17ac8b7c5710 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -821,7 +821,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct ib_event event;
unsigned int flags;
- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 117b42349a28..d60e3dcea087 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1779,7 +1779,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
break;
}
- /* fall thru */
+ fallthrough;
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
{
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 77bc02a9228e..1f288c73ccfc 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2885,7 +2885,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case MORIBUND:
case CLOSING:
stop_ep_timer(ep);
- /*FALLTHROUGH*/
+ fallthrough;
case FPDU_MODE:
if (ep->com.qp && ep->com.qp->srq) {
srqidx = ABORT_RSS_SRQIDX_G(
@@ -3759,7 +3759,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
send_fw_act_open_req(ep, atid);
return;
}
- /* fall through */
+ fallthrough;
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ac48012c992f..cbddb20c6121 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1165,7 +1165,7 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
}
fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
- /*FALLTHROUGH*/
+ fallthrough;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index b12e4665c9ab..4a4ec2397857 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -209,7 +209,6 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
fallthrough;
case 1:
*dest++ = *src++;
- /* fall through */
}
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 9af82ff933d7..73d197e21730 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
switch (prev->wr.opcode) {
case IB_WR_TID_RDMA_WRITE:
req = wqe_to_tid_req(prev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index da9888deff8c..6edcbdcd8f43 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -65,8 +65,6 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_RESERVED_SGE 1
-
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 07b4c85d341d..aeb3a6fa7d47 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -535,7 +535,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
- dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
+ dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
ext_sdb_alept, ext_sdb_alful);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index d2968594664b..4cda95ed1fbe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
- if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
@@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
if (wr->num_sge < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
- dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
@@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (wr->num_sge < srq->max_gs) {
- dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
- dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].len = 0;
+ dseg[i].lkey = cpu_to_le32(0x100);
dseg[i].addr = 0;
}
@@ -5070,7 +5069,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
attr->srq_limit = limit_wl;
attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+ attr->max_sge = srq->max_gs;
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 1fb1c583d0f8..ac29be43b6bd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -92,9 +92,7 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x0
-#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
-
+#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e94ca130ff5e..c063c450c715 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
return -EINVAL;
}
- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
- HNS_ROCE_RESERVED_SGE);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+ cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index f40a000e94ee..b9e2dbd372b6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
+ srq->max_gs = init_attr->attr.max_sge;
if (udata) {
ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index fa7a5ff498c7..a3b95805c154 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2443,7 +2443,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
case I40IW_CM_STATE_FIN_WAIT1:
case I40IW_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
- /* fall through */
+ fallthrough;
case I40IW_CM_STATE_TIME_WAIT:
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 688f19667221..86d3f8aff329 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1964,7 +1964,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->out_rdrsp = true;
break;
case I40IW_AE_SOURCE_RSVD:
- /* fallthrough */
default:
break;
}
@@ -3762,14 +3761,14 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
set_64bit_val(wqe, 56, info->entry[2].data);
- /* fallthrough */
+ fallthrough;
case 2:
set_64bit_val(wqe, 32,
(LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
set_64bit_val(wqe, 40, info->entry[1].data);
- /* fallthrough */
+ fallthrough;
case 1:
set_64bit_val(wqe, 0,
LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index ae8b97c30665..e1085634b8d9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -353,7 +353,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_BAD_CLOSE:
- /* fall through */
case I40IW_AE_RESET_SENT:
i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
i40iw_cm_disconn(iwqp);
@@ -413,7 +412,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
- /* fall through */
+ fallthrough;
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 9c96ece5e7f3..58a433135a03 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1489,36 +1489,35 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
iwdev->iw_status = 0;
i40iw_port_ibevent(iwdev);
i40iw_destroy_rdma_device(iwdev->iwibdev);
- /* fallthrough */
+ fallthrough;
case IP_ADDR_REGISTERED:
if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
- /* fallthrough */
- /* fallthrough */
+ fallthrough;
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
- /* fallthrough */
+ fallthrough;
case CEQ_CREATED:
i40iw_dele_ceqs(iwdev);
- /* fallthrough */
+ fallthrough;
case AEQ_CREATED:
i40iw_destroy_aeq(iwdev);
- /* fallthrough */
+ fallthrough;
case IEQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case ILQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case CCQ_CREATED:
i40iw_destroy_ccq(iwdev);
- /* fallthrough */
+ fallthrough;
case HMC_OBJS_CREATED:
i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true);
- /* fallthrough */
+ fallthrough;
case INITIAL_STATE:
i40iw_cleanup_cm_core(&iwdev->cm_core);
if (iwdev->vsi.pestat) {
@@ -1528,7 +1527,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
i40iw_del_init_mem(iwdev);
break;
case INVALID_STATE:
- /* fallthrough */
default:
i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
break;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index d9c7ae6a7030..924be4b03c9a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -814,13 +814,13 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
switch (rsrc->completion) {
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
- /* fall through */
+ fallthrough;
case PUDA_QP_CREATED:
if (!reset)
i40iw_puda_free_qp(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
- /* fallthrough */
+ fallthrough;
case PUDA_CQ_CREATED:
if (!reset)
i40iw_puda_free_cq(rsrc);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 016524683e17..e07fb37af086 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -190,9 +190,8 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
- /* Fall through */
case NETDEV_CHANGEADDR:
/* Just skip if no need to handle ARP cache */
@@ -247,9 +246,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
- /* Fall through */
case NETDEV_CHANGEADDR:
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
@@ -344,7 +342,7 @@ int i40iw_netdevice_event(struct notifier_block *notifier,
switch (event) {
case NETDEV_DOWN:
iwdev->iw_status = 0;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
i40iw_port_ibevent(iwdev);
break;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 6957e4f3404b..b51339328a51 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -810,7 +810,7 @@ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
case I40IW_QP_STATE_RTS:
if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
i40iw_send_reset(iwqp->cm_node);
- /* fall through */
+ fallthrough;
case I40IW_QP_STATE_IDLE:
case I40IW_QP_STATE_TERMINATE:
case I40IW_QP_STATE_CLOSING:
@@ -2144,7 +2144,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
switch (ib_wr->opcode) {
case IB_WR_SEND:
- /* fall-through */
case IB_WR_SEND_WITH_INV:
if (ib_wr->opcode == IB_WR_SEND) {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
@@ -2201,7 +2200,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
- /* fall-through*/
+ fallthrough;
case IB_WR_RDMA_READ:
if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
err = -EINVAL;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index f8b936b76dcd..8a3436994f80 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -765,13 +765,13 @@ repoll:
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index d844831179cf..5e4ec9786081 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -944,7 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
- /* fall through */
+ fallthrough;
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index f9ca6e000a81..2975f350b9fd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1578,12 +1578,12 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
pd = to_mxrcd(init_attr->xrcd)->pd;
xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_INI:
if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
return ERR_PTR(-ENOSYS);
init_attr->recv_cq = init_attr->send_cq;
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
@@ -1592,7 +1592,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- /* fall through */
+ fallthrough;
case IB_QPT_UD:
{
err = create_qp_common(pd, init_attr, udata, 0, &qp);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 0133ebb8d740..dceb0eb2bed1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -121,13 +121,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 454ce5de2de7..9bb9bb058932 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -250,9 +250,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
method == IB_MGMT_METHOD_GET)
return process_pma_cmd(dev, port_num, in, out);
- /* fallthrough */
+ fallthrough;
case MLX5_IB_VENDOR_CLASS1:
- /* fallthrough */
case MLX5_IB_VENDOR_CLASS2:
case IB_MGMT_CLASS_CONG_MGMT: {
if (method != IB_MGMT_METHOD_GET &&
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fbc45a5e76c5..d60d63221b14 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2872,7 +2872,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
break;
case MLX5_EVENT_TYPE_GENERAL_EVENT:
handle_general_event(ibdev, work->param, &ibev);
- /* fall through */
+ fallthrough;
default:
goto out;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 59fce5fac7a3..5758dbe64045 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -416,7 +416,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr)
switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
max(sizeof(struct mlx5_wqe_atomic_seg) +
@@ -441,7 +441,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr)
if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
size += sizeof(struct mlx5_wqe_eth_pad) +
sizeof(struct mlx5_wqe_eth_seg);
- /* fall through */
+ fallthrough;
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 0823c0bc7e73..f051f4e06b53 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
switch ((cur_rate - 1) / req_rate) {
case 0: return MTHCA_RATE_MEMFREE_FULL;
case 1: return MTHCA_RATE_MEMFREE_HALF;
- case 2: /* fall through */
+ case 2:
case 3: return MTHCA_RATE_MEMFREE_QUARTER;
default: return MTHCA_RATE_MEMFREE_EIGHTH;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6cdbec13756a..c1751c9a0f62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -2134,7 +2134,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
case IB_WR_SEND_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
- /* fall through */
+ fallthrough;
case IB_WR_SEND:
hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
ocrdma_build_send(qp, hdr, wr);
@@ -2148,7 +2148,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
- /* fall through */
+ fallthrough;
case IB_WR_RDMA_WRITE:
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4ce4e2eef6cc..b49bef94637e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3528,7 +3528,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case IB_WR_RDMA_READ_WITH_INV:
SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
- /* fallthrough -- same is identical to RDMA READ */
+ fallthrough; /* same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index ca5ea734e3d0..44150be215bf 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2973,11 +2973,11 @@ static u32 qib_6120_iblink_state(u64 ibcs)
state = IB_PORT_ARMED;
break;
case IB_6120_L_STATE_ACTIVE:
- /* fall through */
case IB_6120_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_6120_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index ea3ddb05cbad..0a6f26d4cb31 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3586,11 +3586,11 @@ static u32 qib_7220_iblink_state(u64 ibcs)
state = IB_PORT_ARMED;
break;
case IB_7220_L_STATE_ACTIVE:
- /* fall through */
case IB_7220_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_7220_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 8bcbc884e5b6..a10eab89aee4 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5508,11 +5508,11 @@ static u32 qib_7322_iblink_state(u64 ibcs)
state = IB_PORT_ARMED;
break;
case IB_7322_L_STATE_ACTIVE:
- /* fall through */
case IB_7322_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_7322_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
@@ -6533,7 +6533,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
"Invalid num_vls %u, using 4 VLs\n",
qib_num_cfg_vls);
qib_num_cfg_vls = 4;
- /* fall through */
+ fallthrough;
case 4:
ppd->vls_supported = IB_VL_VL0_3;
break;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 79bb83222e8d..e7789e724f56 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -433,7 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
- /* fall through */
+ fallthrough;
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -828,7 +828,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
case IB_PORT_NOP:
if (lstate == 0)
break;
- /* FALLTHROUGH */
+ fallthrough;
case IB_PORT_DOWN:
if (lstate == 0)
lstate = QIB_IB_LINKDOWN_ONLY;
@@ -1928,7 +1928,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
@@ -1962,7 +1962,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
@@ -2322,7 +2322,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
ret = cc_get_congestion_control_table(ccp, ibdev, port);
goto bail;
- /* FALLTHROUGH */
+ fallthrough;
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
@@ -2339,7 +2339,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
ret = cc_set_congestion_control_table(ccp, ibdev, port);
goto bail;
- /* FALLTHROUGH */
+ fallthrough;
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index aaf7438258fa..3915e5b4a9bc 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -83,7 +83,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
- /* FALLTHROUGH */
+ fallthrough;
case OP(ATOMIC_ACKNOWLEDGE):
/*
* We can increment the tail pointer now that the last
@@ -92,7 +92,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
*/
if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_ONLY):
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
@@ -149,7 +149,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
case OP(RDMA_READ_RESPONSE_FIRST):
qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_READ_RESPONSE_MIDDLE):
qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
@@ -471,10 +471,10 @@ no_flow_control:
* See qib_restart_rc().
*/
qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
ss = &qp->s_sge;
@@ -510,10 +510,10 @@ no_flow_control:
* See qib_restart_rc().
*/
qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
ss = &qp->s_sge;
@@ -1807,7 +1807,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
if (!ret)
goto rnr_nak;
qp->r_rcv_len = 0;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
case OP(RDMA_WRITE_MIDDLE):
send_middle:
@@ -1839,7 +1839,7 @@ send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
- /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
+ fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 99e11c347130..8f8d61736656 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -763,7 +763,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd,
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
- /* fall through -- and start dma engine */
+ fallthrough; /* and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index e17b91e2c22a..554af4273a13 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -161,7 +161,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
@@ -185,7 +185,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
@@ -351,7 +351,7 @@ send_first:
goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
@@ -440,7 +440,7 @@ rdma_first:
wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
}
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7acf9ba5358a..f6c01bad5a74 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -237,7 +237,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
case IB_QPT_GSI:
if (ib_qib_disable_sma)
break;
- /* FALLTHROUGH */
+ fallthrough;
case IB_QPT_UD:
qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
break;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index c9abe1c01e4e..662e7fc7f628 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -120,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
IB_QPS_ERR,
NULL);
if (status) {
- usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+ usnic_err("Failed to transition qp grp %u from %s to %s\n",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string
(cur_state),
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index afcc2abcf55c..9a8f2a9507be 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -238,7 +238,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
ret = -EINVAL;
goto err_qp;
}
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UD:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 332a8ba94b81..ee48befc8978 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1111,7 +1111,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->port_num == 0 ||
init_attr->port_num > ibpd->device->phys_port_cnt)
return ERR_PTR(-EINVAL);
- /* fall through */
+ fallthrough;
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 4bc88708b355..7b4df0028388 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -282,7 +282,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
if ((syn & AETH_TYPE_MASK) != AETH_ACK)
return COMPST_ERROR;
- /* fall through */
+ fallthrough;
/* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
*/
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 08f05ac5f5d5..ecdac3f8fcc9 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -71,7 +71,7 @@ void rxe_do_task(unsigned long data)
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
- /* fall through */
+ fallthrough;
case TASK_STATE_ARMED:
spin_unlock_irqrestore(&task->state_lock, flags);
return;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index bb61e534e468..658939e5c34a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -540,7 +540,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
wr->ex.imm_data = ibwr->ex.imm_data;
- /* fall through */
+ fallthrough;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 1662216be66d..66764f7ef072 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1224,12 +1224,10 @@ static void siw_cm_llp_data_ready(struct sock *sk)
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:
- /* fall through */
case SIW_EPSTATE_LISTENING:
break;
case SIW_EPSTATE_AWAIT_MPAREQ:
- /* fall through */
case SIW_EPSTATE_AWAIT_MPAREP:
siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
break;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 857be5a7d0bd..4bd1f1f84057 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -1215,7 +1215,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
case RDMAP_SEND_SE:
case RDMAP_SEND_SE_INVAL:
wqe->rqe.flags |= SIW_WQE_SOLICITED;
- /* Fall through */
+ fallthrough;
case RDMAP_SEND:
case RDMAP_SEND_INVAL:
@@ -1386,7 +1386,7 @@ int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
* DDP segment.
*/
qp->rx_fpdu->first_ddp_seg = 0;
- /* Fall through */
+ fallthrough;
case SIW_GET_DATA_START:
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 9f53aa4feb87..d19d8325588b 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1042,7 +1042,7 @@ next_wqe:
case SIW_OP_SEND_REMOTE_INV:
case SIW_OP_WRITE:
siw_wqe_put_mem(wqe, tx_type);
- /* Fall through */
+ fallthrough;
case SIW_OP_INVAL_STAG:
case SIW_OP_REG_MR:
@@ -1128,7 +1128,7 @@ next_wqe:
case SIW_OP_READ:
case SIW_OP_READ_LOCAL_INV:
siw_wqe_put_mem(wqe, tx_type);
- /* Fall through */
+ fallthrough;
case SIW_OP_INVAL_STAG:
case SIW_OP_REG_MR:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 9bf0fa30df28..7c41fb040f7c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -512,13 +512,13 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
ib_send_cm_drep(cm_id, NULL, 0);
- /* Fall through */
+ fallthrough;
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = ipoib_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
- /* Fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 752581a8627b..ab75b7f745d4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -502,7 +502,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
default:
dev_warn_ratelimited(&dev->dev,
"duplicate IP address detected\n");
- /* Fall through */
+ fallthrough;
case 1:
return net_dev;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 699e075ae1b3..2f3ebc0a75d9 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -711,7 +711,7 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_REJECTED:
iser_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 61e2f7fc513d..e86acda3cf8c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -664,8 +664,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
break;
- case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
- case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
@@ -684,7 +684,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
@@ -1470,7 +1470,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /* fall through */
+ fallthrough;
default:
iscsit_release_cmd(cmd);
break;
@@ -1648,7 +1648,7 @@ isert_do_control_comp(struct work_struct *work)
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- /* fall through */
+ fallthrough;
case ISTATE_SEND_REJECT:
case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 874a8eb7638c..4933085a864a 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -547,7 +547,6 @@ static void vema_get(struct opa_vnic_vema_port *port,
vema_get_mac_entries(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_IFACE_UCAST_MACS:
- /* fall through */
case OPA_EM_ATTR_IFACE_MCAST_MACS:
vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
break;
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index a81e14148407..f699538bdac4 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -16,7 +16,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
if (dev->absinfo && test_bit(src, dev->absbit)) {
dev->absinfo[dst] = dev->absinfo[src];
dev->absinfo[dst].fuzz = 0;
- dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
+ __set_bit(dst, dev->absbit);
}
}
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index a7bc576eb342..434d265fa2e8 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -247,7 +247,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
- /* fall through */
+ fallthrough;
default:
return data[0];
}
@@ -267,14 +267,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
- /* fall through */
+ fallthrough;
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
- /* fall through */
+ fallthrough;
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
- /* fall through */
+ fallthrough;
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -368,7 +368,7 @@ static void db9_timer(struct timer_list *t)
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
- /* fall through */
+ fallthrough;
case DB9_MULTI_0802:
diff --git a/drivers/input/joystick/fsia6b.c b/drivers/input/joystick/fsia6b.c
index e78c4c768990..76ffdec5c183 100644
--- a/drivers/input/joystick/fsia6b.c
+++ b/drivers/input/joystick/fsia6b.c
@@ -102,12 +102,12 @@ static irqreturn_t fsia6b_serio_irq(struct serio *serio,
input_report_key(fsia6b->dev,
sw_id++,
sw_state == 0);
- /* fall-through */
+ fallthrough;
case '2':
input_report_key(fsia6b->dev,
sw_id++,
sw_state == 1);
- /* fall-through */
+ fallthrough;
case '1':
input_report_key(fsia6b->dev,
sw_id++,
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index e0a362be5812..d37645e496ff 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -485,7 +485,7 @@ static void gc_multi_process_packet(struct gc *gc)
switch (pad->type) {
case GC_MULTI2:
input_report_key(dev, BTN_THUMB, s & data[5]);
- /* fall through */
+ fallthrough;
case GC_MULTI:
input_report_abs(dev, ABS_X,
@@ -638,7 +638,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type,
input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04);
input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02);
- /* fall through */
+ fallthrough;
case GC_PSX_NEGCON:
case GC_PSX_ANALOG:
@@ -872,7 +872,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_SNES:
for (i = 4; i < 8; i++)
input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
- /* fall through */
+ fallthrough;
+
case GC_NES:
for (i = 0; i < 4; i++)
input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
@@ -880,10 +881,10 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_MULTI2:
input_set_capability(input_dev, EV_KEY, BTN_THUMB);
- /* fall through */
+ fallthrough;
+
case GC_MULTI:
input_set_capability(input_dev, EV_KEY, BTN_TRIGGER);
- /* fall through */
break;
case GC_PSX:
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 1777e68c9f02..fac91ea14f17 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -656,16 +656,19 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
switch (i * m) {
case 60:
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 45: /* Ambiguous packet length */
if (j <= 40) { /* ID length less or eq 40 -> FSP */
case 43:
sw->type = SW_ID_FSP;
break;
}
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 30:
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 15:
sw->type = SW_ID_GP;
break;
@@ -681,9 +684,11 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->type = SW_ID_PP;
break;
case 66:
- sw->bits = 3; /* fall through */
+ sw->bits = 3;
+ fallthrough;
case 198:
- sw->length = 22; /* fall through */
+ sw->length = 22;
+ fallthrough;
case 64:
sw->type = SW_ID_3DP;
if (j == 160)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index cf7cbcd0c29d..429411c6c0a8 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -146,7 +146,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
break;
}
spaceball->escape = 0;
- /* fall through */
+ fallthrough;
case 'M':
case 'Q':
case 'S':
@@ -154,7 +154,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
spaceball->escape = 0;
data &= 0x1f;
}
- /* fall through */
+ fallthrough;
default:
if (spaceball->escape)
spaceball->escape = 0;
@@ -220,13 +220,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) |
BIT_MASK(BTN_B) | BIT_MASK(BTN_C) |
BIT_MASK(BTN_MODE);
- /* fall through */
+ fallthrough;
default:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) |
BIT_MASK(BTN_3) | BIT_MASK(BTN_4) |
BIT_MASK(BTN_5) | BIT_MASK(BTN_6) |
BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
- /* fall through */
+ fallthrough;
case SPACEBALL_3003C:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) |
BIT_MASK(BTN_8);
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index e7d58e7f0257..eb0e9cd66bcb 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1016,7 +1016,7 @@ static int adp5589_probe(struct i2c_client *client,
switch (id->driver_data) {
case ADP5585_02:
kpad->support_row5 = true;
- /* fall through */
+ fallthrough;
case ADP5585_01:
kpad->is_adp5585 = true;
kpad->var = &const_adp5585;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6ec28265771d..edc613efc158 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1241,7 +1241,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
case SERIO_8042_XL:
atkbd->translated = true;
- /* Fall through */
+ fallthrough;
case SERIO_8042:
if (serio->write)
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 53c9ff338dea..f2d4e4daa818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -574,7 +574,6 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING;
break;
case EV_ACT_ANY:
- /* fall through */
default:
/*
* For other cases, we are OK letting suspend/resume
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 305f0160506a..8a36d78fed63 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(char *buffer,
{
pr_debug("%s()\n", __func__);
- return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
+ return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg);
}
static int ati_remote2_set_mode_mask(const char *val,
@@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(char *buffer,
{
pr_debug("%s()\n", __func__);
- return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
+ return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg);
}
static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index c09b9628ad34..e413801f0491 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -663,12 +663,8 @@ static const struct usb_device_id cm109_usb_table[] = {
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
kfree(dev->ctl_req);
- if (dev->ctl_data)
- usb_free_coherent(dev->udev, USB_PKT_LEN,
- dev->ctl_data, dev->ctl_dma);
- if (dev->irq_data)
- usb_free_coherent(dev->udev, USB_PKT_LEN,
- dev->irq_data, dev->irq_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d8dbfc030d0f..08b9b5cdb943 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -335,7 +335,7 @@ static int ims_pcu_setup_gamepad(struct ims_pcu *pcu)
err_free_mem:
input_free_device(input);
kfree(gamepad);
- return -ENOMEM;
+ return error;
}
static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index 6699eb160a0f..a348247d3d38 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -575,8 +575,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
-
- /* fall through */
+ fallthrough;
case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
@@ -731,14 +730,12 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
iqs269->switches[i].code = val;
iqs269->switches[i].enabled = true;
}
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
break;
-
- /* fall through */
+ fallthrough;
default:
iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
@@ -1143,14 +1140,12 @@ static int iqs269_input_init(struct iqs269_private *iqs269)
sw_code,
state & BIT(j));
}
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
-
- /* fall through */
+ fallthrough;
default:
if (keycode != KEY_RESERVED)
@@ -1273,14 +1268,12 @@ static int iqs269_report(struct iqs269_private *iqs269)
input_report_switch(iqs269->keypad,
sw_code,
state & BIT(j));
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
-
- /* fall through */
+ fallthrough;
default:
input_report_key(iqs269->keypad, keycode,
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 8ceaf7db2882..81e777a04b88 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -190,7 +190,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Failed to request direction pwm: %d", err);
- /* Fall through */
+ fallthrough;
case -EPROBE_DEFER:
return err;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index a1bba722b234..4ff5cd2a6d8d 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -124,7 +124,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
switch (mtouch->event_type) {
case XENKBD_MT_EV_DOWN:
input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true);
- /* fall through */
+ fallthrough;
case XENKBD_MT_EV_MOTION:
input_report_abs(info->mtouch, ABS_MT_POSITION_X,
@@ -524,7 +524,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 34700eda0429..b067bfd2699c 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1929,7 +1929,7 @@ static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
static int alps_absolute_mode_v6(struct psmouse *psmouse)
{
u16 reg_val = 0x181;
- int ret = -1;
+ int ret;
/* enter monitor mode, to write the register */
if (alps_monitor_mode(psmouse, true))
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 3f06e8a495d8..bfa26651c0be 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -458,7 +458,7 @@ static int atp_status_check(struct urb *urb)
dev->info->datalen, dev->urb->actual_length);
dev->overflow_warned = true;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 00e395dfc3d5..a0361f9325f8 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -1067,7 +1067,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
return error;
}
- /* Fall through */
+ fallthrough;
case CYAPA_STATE_BL_IDLE:
/* Try to get firmware version in bootloader mode. */
cyapa_gen3_bl_query_data(cyapa);
@@ -1078,7 +1078,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
return error;
}
- /* Fall through */
+ fallthrough;
case CYAPA_STATE_OP:
/*
* Reading query data before going back to the full mode
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 7f012bfa2658..bb3a63d1268d 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2554,7 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN5_APP;
- /* fall through */
+ fallthrough;
case CYAPA_STATE_GEN5_APP:
/*
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index c1b524ab4623..7eba66fbef58 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -680,7 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN6_APP;
- /* fall through */
+ fallthrough;
case CYAPA_STATE_GEN6_APP:
/*
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index a9074ac9364f..c75b00c45d75 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -26,6 +26,8 @@
#define ETP_CALIBRATE_MAX_LEN 3
+#define ETP_FEATURE_REPORT_MK BIT(0)
+
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
@@ -33,6 +35,8 @@
#define ETP_FW_IAP_PAGE_ERR (1 << 5)
#define ETP_FW_IAP_INTF_ERR (1 << 4)
#define ETP_FW_PAGE_SIZE 64
+#define ETP_FW_PAGE_SIZE_128 128
+#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
struct i2c_client;
@@ -55,8 +59,9 @@ struct elan_transport_ops {
int (*get_baseline_data)(struct i2c_client *client,
bool max_baseliune, u8 *value);
- int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
- int (*get_sm_version)(struct i2c_client *client,
+ int (*get_version)(struct i2c_client *client, u8 pattern, bool iap,
+ u8 *version);
+ int (*get_sm_version)(struct i2c_client *client, u8 pattern,
u16 *ic_type, u8 *version, u8 *clickpad);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
int (*get_product_id)(struct i2c_client *client, u16 *id);
@@ -72,13 +77,18 @@ struct elan_transport_ops {
int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
int (*iap_reset)(struct i2c_client *client);
- int (*prepare_fw_update)(struct i2c_client *client);
- int (*write_fw_block)(struct i2c_client *client,
+ int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type,
+ u8 iap_version);
+ int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx);
int (*finish_fw_update)(struct i2c_client *client,
struct completion *reset_done);
- int (*get_report)(struct i2c_client *client, u8 *report);
+ int (*get_report_features)(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len);
+ int (*get_report)(struct i2c_client *client, u8 *report,
+ unsigned int report_len);
int (*get_pressure_adjustment)(struct i2c_client *client,
int *adjustment);
int (*get_pattern)(struct i2c_client *client, u8 *pattern);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 6291fb5fa015..c599e21a8478 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -50,12 +50,14 @@
#define ETP_MAX_FINGERS 5
#define ETP_FINGER_DATA_LEN 5
#define ETP_REPORT_ID 0x5D
+#define ETP_REPORT_ID2 0x60 /* High precision report */
#define ETP_TP_REPORT_ID 0x5E
#define ETP_REPORT_ID_OFFSET 2
#define ETP_TOUCH_INFO_OFFSET 3
#define ETP_FINGER_DATA_OFFSET 4
#define ETP_HOVER_INFO_OFFSET 30
-#define ETP_MAX_REPORT_LEN 34
+#define ETP_MK_DATA_OFFSET 33 /* For high precision reports */
+#define ETP_MAX_REPORT_LEN 39
/* The main device structure */
struct elan_tp_data {
@@ -85,11 +87,14 @@ struct elan_tp_data {
u8 sm_version;
u8 iap_version;
u16 fw_checksum;
+ unsigned int report_features;
+ unsigned int report_len;
int pressure_adjustment;
u8 mode;
u16 ic_type;
u16 fw_validpage_count;
- u16 fw_signature_address;
+ u16 fw_page_size;
+ u32 fw_signature_address;
bool irq_wake;
@@ -100,8 +105,8 @@ struct elan_tp_data {
bool middle_button;
};
-static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
- u16 *signature_address)
+static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
+ u32 *signature_address, u16 *page_size)
{
switch (ic_type) {
case 0x00:
@@ -126,16 +131,37 @@ static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
case 0x10:
*validpage_count = 1024;
break;
+ case 0x11:
+ *validpage_count = 1280;
+ break;
+ case 0x13:
+ *validpage_count = 2048;
+ break;
+ case 0x14:
+ case 0x15:
+ *validpage_count = 1024;
+ break;
default:
/* unknown ic type clear value */
*validpage_count = 0;
*signature_address = 0;
+ *page_size = 0;
return -ENXIO;
}
*signature_address =
(*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+ if ((ic_type == 0x14 || ic_type == 0x15) && iap_version >= 2) {
+ *validpage_count /= 8;
+ *page_size = ETP_FW_PAGE_SIZE_512;
+ } else if (ic_type >= 0x0D && iap_version >= 1) {
+ *validpage_count /= 2;
+ *page_size = ETP_FW_PAGE_SIZE_128;
+ } else {
+ *page_size = ETP_FW_PAGE_SIZE;
+ }
+
return 0;
}
@@ -215,8 +241,13 @@ static int elan_query_product(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version, &data->clickpad);
+ error = data->ops->get_pattern(data->client, &data->pattern);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, data->pattern,
+ &data->ic_type, &data->sm_version,
+ &data->clickpad);
if (error)
return error;
@@ -312,9 +343,9 @@ static int elan_initialize(struct elan_tp_data *data)
static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
- u16 ic_type;
- error = data->ops->get_version(data->client, false, &data->fw_version);
+ error = data->ops->get_version(data->client, data->pattern, false,
+ &data->fw_version);
if (error)
return error;
@@ -323,7 +354,8 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_version(data->client, true, &data->iap_version);
+ error = data->ops->get_version(data->client, data->pattern,
+ true, &data->iap_version);
if (error)
return error;
@@ -332,17 +364,16 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_pattern(data->client, &data->pattern);
+ error = data->ops->get_report_features(data->client, data->pattern,
+ &data->report_features,
+ &data->report_len);
if (error)
return error;
- if (data->pattern == 0x01)
- ic_type = data->ic_type;
- else
- ic_type = data->iap_version;
-
- error = elan_get_fwinfo(ic_type, &data->fw_validpage_count,
- &data->fw_signature_address);
+ error = elan_get_fwinfo(data->ic_type, data->iap_version,
+ &data->fw_validpage_count,
+ &data->fw_signature_address,
+ &data->fw_page_size);
if (error)
dev_warn(&data->client->dev,
"unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
@@ -351,16 +382,21 @@ static int elan_query_device_info(struct elan_tp_data *data)
return 0;
}
-static unsigned int elan_convert_resolution(u8 val)
+static unsigned int elan_convert_resolution(u8 val, u8 pattern)
{
/*
- * (value from firmware) * 10 + 790 = dpi
- *
+ * pattern <= 0x01:
+ * (value from firmware) * 10 + 790 = dpi
+ * else
+ * ((value from firmware) + 3) * 100 = dpi
+ */
+ int res = pattern <= 0x01 ?
+ (int)(char)val * 10 + 790 : ((int)(char)val + 3) * 100;
+ /*
* We also have to convert dpi to dots/mm (*10/254 to avoid floating
* point).
*/
-
- return ((int)(char)val * 10 + 790) * 10 / 254;
+ return res * 10 / 254;
}
static int elan_query_device_parameters(struct elan_tp_data *data)
@@ -409,8 +445,8 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
if (error)
return error;
- data->x_res = elan_convert_resolution(hw_x_res);
- data->y_res = elan_convert_resolution(hw_y_res);
+ data->x_res = elan_convert_resolution(hw_x_res, data->pattern);
+ data->y_res = elan_convert_resolution(hw_y_res, data->pattern);
} else {
data->x_res = (data->max_x + 1) / x_mm;
data->y_res = (data->max_y + 1) / y_mm;
@@ -430,14 +466,14 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
* IAP firmware updater related routines
**********************************************************
*/
-static int elan_write_fw_block(struct elan_tp_data *data,
+static int elan_write_fw_block(struct elan_tp_data *data, u16 page_size,
const u8 *page, u16 checksum, int idx)
{
int retry = ETP_RETRY_COUNT;
int error;
do {
- error = data->ops->write_fw_block(data->client,
+ error = data->ops->write_fw_block(data->client, page_size,
page, checksum, idx);
if (!error)
return 0;
@@ -460,21 +496,23 @@ static int __elan_update_firmware(struct elan_tp_data *data,
u16 boot_page_count;
u16 sw_checksum = 0, fw_checksum = 0;
- error = data->ops->prepare_fw_update(client);
+ error = data->ops->prepare_fw_update(client, data->ic_type,
+ data->iap_version);
if (error)
return error;
iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
- boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+ boot_page_count = (iap_start_addr * 2) / data->fw_page_size;
for (i = boot_page_count; i < data->fw_validpage_count; i++) {
u16 checksum = 0;
- const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+ const u8 *page = &fw->data[i * data->fw_page_size];
- for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+ for (j = 0; j < data->fw_page_size; j += 2)
checksum += ((page[j + 1] << 8) | page[j]);
- error = elan_write_fw_block(data, page, checksum, i);
+ error = elan_write_fw_block(data, data->fw_page_size,
+ page, checksum, i);
if (error) {
dev_err(dev, "write page %d fail: %d\n", i, error);
return error;
@@ -886,24 +924,22 @@ static const struct attribute_group *elan_sysfs_groups[] = {
* Elan isr functions
******************************************************************
*/
-static void elan_report_contact(struct elan_tp_data *data,
- int contact_num, bool contact_valid,
- u8 *finger_data)
+static void elan_report_contact(struct elan_tp_data *data, int contact_num,
+ bool contact_valid, bool high_precision,
+ u8 *packet, u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
- unsigned int pressure, mk_x, mk_y;
- unsigned int area_x, area_y, major, minor;
- unsigned int scaled_pressure;
+ unsigned int pressure, scaled_pressure;
if (contact_valid) {
- pos_x = ((finger_data[0] & 0xf0) << 4) |
- finger_data[1];
- pos_y = ((finger_data[0] & 0x0f) << 8) |
- finger_data[2];
- mk_x = (finger_data[3] & 0x0f);
- mk_y = (finger_data[3] >> 4);
- pressure = finger_data[4];
+ if (high_precision) {
+ pos_x = get_unaligned_be16(&finger_data[0]);
+ pos_y = get_unaligned_be16(&finger_data[2]);
+ } else {
+ pos_x = ((finger_data[0] & 0xf0) << 4) | finger_data[1];
+ pos_y = ((finger_data[0] & 0x0f) << 8) | finger_data[2];
+ }
if (pos_x > data->max_x || pos_y > data->max_y) {
dev_dbg(input->dev.parent,
@@ -913,18 +949,8 @@ static void elan_report_contact(struct elan_tp_data *data,
return;
}
- /*
- * To avoid treating large finger as palm, let's reduce the
- * width x and y per trace.
- */
- area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
- area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
-
- major = max(area_x, area_y);
- minor = min(area_x, area_y);
-
+ pressure = finger_data[4];
scaled_pressure = pressure + data->pressure_adjustment;
-
if (scaled_pressure > ETP_MAX_PRESSURE)
scaled_pressure = ETP_MAX_PRESSURE;
@@ -933,16 +959,37 @@ static void elan_report_contact(struct elan_tp_data *data,
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
- input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
- input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+
+ if (data->report_features & ETP_FEATURE_REPORT_MK) {
+ unsigned int mk_x, mk_y, area_x, area_y;
+ u8 mk_data = high_precision ?
+ packet[ETP_MK_DATA_OFFSET + contact_num] :
+ finger_data[3];
+
+ mk_x = mk_data & 0x0f;
+ mk_y = mk_data >> 4;
+
+ /*
+ * To avoid treating large finger as palm, let's reduce
+ * the width x and y per trace.
+ */
+ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+ area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+ input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ max(area_x, area_y));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ min(area_x, area_y));
+ }
} else {
input_mt_slot(input, contact_num);
input_mt_report_slot_inactive(input);
}
}
-static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet,
+ bool high_precision)
{
struct input_dev *input = data->input;
u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
@@ -953,11 +1000,12 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
pm_wakeup_event(&data->client->dev, 0);
- hover_event = hover_info & 0x40;
- for (i = 0; i < ETP_MAX_FINGERS; i++) {
- contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, finger_data);
+ hover_event = hover_info & BIT(6);
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ contact_valid = tp_info & BIT(3 + i);
+ elan_report_contact(data, i, contact_valid, high_precision,
+ packet, finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
}
@@ -1015,13 +1063,16 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
goto out;
}
- error = data->ops->get_report(data->client, report);
+ error = data->ops->get_report(data->client, report, data->report_len);
if (error)
goto out;
switch (report[ETP_REPORT_ID_OFFSET]) {
case ETP_REPORT_ID:
- elan_report_absolute(data, report);
+ elan_report_absolute(data, report, false);
+ break;
+ case ETP_REPORT_ID2:
+ elan_report_absolute(data, report, true);
break;
case ETP_TP_REPORT_ID:
elan_report_trackpoint(data, report);
@@ -1112,7 +1163,9 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_X, data->x_res);
input_abs_set_res(input, ABS_Y, data->y_res);
input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
- input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+ if (data->report_features & ETP_FEATURE_REPORT_MK)
+ input_set_abs_params(input, ABS_TOOL_WIDTH,
+ 0, ETP_FINGER_WIDTH, 0, 0);
input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
/* And MT parameters */
@@ -1122,10 +1175,12 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
input_set_abs_params(input, ABS_MT_PRESSURE, 0,
ETP_MAX_PRESSURE, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
- ETP_FINGER_WIDTH * max_width, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
- ETP_FINGER_WIDTH * min_width, 0, 0);
+ if (data->report_features & ETP_FEATURE_REPORT_MK) {
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ 0, ETP_FINGER_WIDTH * max_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ 0, ETP_FINGER_WIDTH * min_width, 0, 0);
+ }
data->input = input;
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 058b35b1f9a9..5a496d4ffa49 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
@@ -43,6 +44,8 @@
#define ETP_I2C_RESOLUTION_CMD 0x0108
#define ETP_I2C_PRESSURE_CMD 0x010A
#define ETP_I2C_IAP_VERSION_CMD 0x0110
+#define ETP_I2C_IC_TYPE_P0_CMD 0x0110
+#define ETP_I2C_IAP_VERSION_P0_CMD 0x0111
#define ETP_I2C_SET_CMD 0x0300
#define ETP_I2C_POWER_CMD 0x0307
#define ETP_I2C_FW_CHECKSUM_CMD 0x030F
@@ -53,8 +56,12 @@
#define ETP_I2C_CALIBRATE_CMD 0x0316
#define ETP_I2C_MAX_BASELINE_CMD 0x0317
#define ETP_I2C_MIN_BASELINE_CMD 0x0318
+#define ETP_I2C_IAP_TYPE_REG 0x0040
+#define ETP_I2C_IAP_TYPE_CMD 0x0304
#define ETP_I2C_REPORT_LEN 34
+#define ETP_I2C_REPORT_LEN_ID2 39
+#define ETP_I2C_REPORT_MAX_LEN 39
#define ETP_I2C_DESC_LENGTH 30
#define ETP_I2C_REPORT_DESC_LENGTH 158
#define ETP_I2C_INF_LENGTH 2
@@ -249,56 +256,52 @@ static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern)
dev_err(&client->dev, "failed to get pattern: %d\n", error);
return error;
}
- *pattern = val[1];
+
+ /*
+ * Not all versions of firmware implement "get pattern" command.
+ * When this command is not implemented the device will respond
+ * with 0xFF 0xFF, which we will treat as "old" pattern 0.
+ */
+ *pattern = val[0] == 0xFF && val[1] == 0xFF ? 0 : val[1];
return 0;
}
static int elan_i2c_get_version(struct i2c_client *client,
- bool iap, u8 *version)
+ u8 pattern, bool iap, u8 *version)
{
int error;
- u8 pattern_ver;
+ u16 cmd;
u8 val[3];
- error = elan_i2c_get_pattern(client, &pattern_ver);
- if (error) {
- dev_err(&client->dev, "failed to get pattern version\n");
- return error;
- }
+ if (!iap)
+ cmd = ETP_I2C_FW_VERSION_CMD;
+ else if (pattern == 0)
+ cmd = ETP_I2C_IAP_VERSION_P0_CMD;
+ else
+ cmd = ETP_I2C_IAP_VERSION_CMD;
- error = elan_i2c_read_cmd(client,
- iap ? ETP_I2C_IAP_VERSION_CMD :
- ETP_I2C_FW_VERSION_CMD,
- val);
+ error = elan_i2c_read_cmd(client, cmd, val);
if (error) {
dev_err(&client->dev, "failed to get %s version: %d\n",
iap ? "IAP" : "FW", error);
return error;
}
- if (pattern_ver == 0x01)
+ if (pattern >= 0x01)
*version = iap ? val[1] : val[0];
else
*version = val[0];
return 0;
}
-static int elan_i2c_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version,
- u8 *clickpad)
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 pattern,
+ u16 *ic_type, u8 *version, u8 *clickpad)
{
int error;
- u8 pattern_ver;
u8 val[3];
- error = elan_i2c_get_pattern(client, &pattern_ver);
- if (error) {
- dev_err(&client->dev, "failed to get pattern version\n");
- return error;
- }
-
- if (pattern_ver == 0x01) {
+ if (pattern >= 0x01) {
error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val);
if (error) {
dev_err(&client->dev, "failed to get ic type: %d\n",
@@ -324,7 +327,14 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
return error;
}
*version = val[0];
- *ic_type = val[1];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_P0_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get ic type: %d\n",
+ error);
+ return error;
+ }
+ *ic_type = val[0];
error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD,
val);
@@ -386,7 +396,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
return error;
}
- *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+ *max_x = le16_to_cpup((__le16 *)val);
error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
if (error) {
@@ -394,7 +404,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
return error;
}
- *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+ *max_y = le16_to_cpup((__le16 *)val);
return 0;
}
@@ -507,7 +517,43 @@ static int elan_i2c_set_flash_key(struct i2c_client *client)
return 0;
}
-static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+static int elan_read_write_iap_type(struct i2c_client *client)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+ int retry = 3;
+
+ do {
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD,
+ ETP_I2C_IAP_TYPE_REG);
+ if (error) {
+ dev_err(&client->dev,
+ "cannot write iap type: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_TYPE_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read iap type register: %d\n",
+ error);
+ return error;
+ }
+ constant = le16_to_cpup((__le16 *)val);
+ dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant);
+
+ if (constant == ETP_I2C_IAP_TYPE_REG)
+ return 0;
+
+ } while (--retry > 0);
+
+ dev_err(&client->dev, "cannot set iap type\n");
+ return -EIO;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+ u8 iap_version)
{
struct device *dev = &client->dev;
int error;
@@ -547,6 +593,12 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
return -EIO;
}
+ if (ic_type >= 0x0D && iap_version >= 1) {
+ error = elan_read_write_iap_type(client);
+ if (error)
+ return error;
+ }
+
/* Set flash key again */
error = elan_i2c_set_flash_key(client);
if (error)
@@ -572,57 +624,64 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
return 0;
}
-static int elan_i2c_write_fw_block(struct i2c_client *client,
+static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx)
{
struct device *dev = &client->dev;
- u8 page_store[ETP_FW_PAGE_SIZE + 4];
+ u8 *page_store;
u8 val[3];
u16 result;
int ret, error;
+ page_store = kmalloc(fw_page_size + 4, GFP_KERNEL);
+ if (!page_store)
+ return -ENOMEM;
+
page_store[0] = ETP_I2C_IAP_REG_L;
page_store[1] = ETP_I2C_IAP_REG_H;
- memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+ memcpy(&page_store[2], page, fw_page_size);
/* recode checksum at last two bytes */
- put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+ put_unaligned_le16(checksum, &page_store[fw_page_size + 2]);
- ret = i2c_master_send(client, page_store, sizeof(page_store));
- if (ret != sizeof(page_store)) {
+ ret = i2c_master_send(client, page_store, fw_page_size + 4);
+ if (ret != fw_page_size + 4) {
error = ret < 0 ? ret : -EIO;
dev_err(dev, "Failed to write page %d: %d\n", idx, error);
- return error;
+ goto exit;
}
/* Wait for F/W to update one page ROM data. */
- msleep(35);
+ msleep(fw_page_size == ETP_FW_PAGE_SIZE_512 ? 50 : 35);
error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
if (error) {
dev_err(dev, "Failed to read IAP write result: %d\n", error);
- return error;
+ goto exit;
}
result = le16_to_cpup((__le16 *)val);
if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
dev_err(dev, "IAP reports failed write: %04hx\n",
result);
- return -EIO;
+ error = -EIO;
+ goto exit;
}
- return 0;
+exit:
+ kfree(page_store);
+ return error;
}
static int elan_i2c_finish_fw_update(struct i2c_client *client,
struct completion *completion)
{
struct device *dev = &client->dev;
- int error;
+ int error = 0;
int len;
- u8 buffer[ETP_I2C_REPORT_LEN];
+ u8 buffer[ETP_I2C_REPORT_MAX_LEN];
- len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
- if (len != ETP_I2C_REPORT_LEN) {
+ len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_MAX_LEN);
+ if (len <= 0) {
error = len < 0 ? len : -EIO;
dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
error, len);
@@ -656,20 +715,31 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
return 0;
}
-static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+static int elan_i2c_get_report_features(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len)
+{
+ *features = ETP_FEATURE_REPORT_MK;
+ *report_len = pattern <= 0x01 ?
+ ETP_I2C_REPORT_LEN : ETP_I2C_REPORT_LEN_ID2;
+ return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client,
+ u8 *report, unsigned int report_len)
{
int len;
- len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+ len = i2c_master_recv(client, report, report_len);
if (len < 0) {
dev_err(&client->dev, "failed to read report data: %d\n", len);
return len;
}
- if (len != ETP_I2C_REPORT_LEN) {
+ if (len != report_len) {
dev_err(&client->dev,
"wrong report length (%d vs %d expected)\n",
- len, ETP_I2C_REPORT_LEN);
+ len, report_len);
return -EIO;
}
@@ -706,5 +776,6 @@ const struct elan_transport_ops elan_i2c_ops = {
.get_pattern = elan_i2c_get_pattern,
+ .get_report_features = elan_i2c_get_report_features,
.get_report = elan_i2c_get_report,
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 8c3185d54c73..8ff823751f3b 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -147,7 +147,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
}
static int elan_smbus_get_version(struct i2c_client *client,
- bool iap, u8 *version)
+ u8 pattern, bool iap, u8 *version)
{
int error;
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -166,9 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version,
- u8 *clickpad)
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 pattern,
+ u16 *ic_type, u8 *version, u8 *clickpad)
{
int error;
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -340,7 +339,8 @@ static int elan_smbus_set_flash_key(struct i2c_client *client)
return 0;
}
-static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+ u8 iap_version)
{
struct device *dev = &client->dev;
int len;
@@ -414,7 +414,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
}
-static int elan_smbus_write_fw_block(struct i2c_client *client,
+static int elan_smbus_write_fw_block(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx)
{
struct device *dev = &client->dev;
@@ -429,7 +429,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
*/
error = i2c_smbus_write_block_data(client,
ETP_SMBUS_WRITE_FW_BLOCK,
- ETP_FW_PAGE_SIZE / 2,
+ fw_page_size / 2,
page);
if (error) {
dev_err(dev, "Failed to write page %d (part %d): %d\n",
@@ -439,8 +439,8 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
error = i2c_smbus_write_block_data(client,
ETP_SMBUS_WRITE_FW_BLOCK,
- ETP_FW_PAGE_SIZE / 2,
- page + ETP_FW_PAGE_SIZE / 2);
+ fw_page_size / 2,
+ page + fw_page_size / 2);
if (error) {
dev_err(dev, "Failed to write page %d (part %d): %d\n",
idx, 2, error);
@@ -469,7 +469,21 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+static int elan_smbus_get_report_features(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len)
+{
+ /*
+ * SMBus controllers with pattern 2 lack area info, as newer
+ * high-precision packets use that space for coordinates.
+ */
+ *features = pattern <= 0x01 ? ETP_FEATURE_REPORT_MK : 0;
+ *report_len = ETP_SMBUS_REPORT_LEN;
+ return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client,
+ u8 *report, unsigned int report_len)
{
int len;
@@ -534,6 +548,7 @@ const struct elan_transport_ops elan_smbus_ops = {
.write_fw_block = elan_smbus_write_fw_block,
.finish_fw_update = elan_smbus_finish_fw_update,
+ .get_report_features = elan_smbus_get_report_features,
.get_report = elan_smbus_get_report,
.get_pattern = elan_smbus_get_pattern,
};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d8434b7b623..90f8765f9efc 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -383,7 +383,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
*/
if (packet[3] & 0x80)
fingers = 4;
- /* fall through */
+ fallthrough;
case 1:
/*
* byte 1: . . . . x11 x10 x9 x8
@@ -1146,7 +1146,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
case 2:
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
- /* fall through */
+ fallthrough;
case 3:
if (info->hw_version == 3)
elantech_set_buttonpad_prop(psmouse);
@@ -1877,12 +1877,10 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
/* expected case */
break;
case ETP_BUS_SMB_ALERT_ONLY:
- /* fall-through */
case ETP_BUS_PS2_SMB_ALERT:
psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
break;
case ETP_BUS_SMB_HST_NTFY_ONLY:
- /* fall-through */
case ETP_BUS_PS2_SMB_HST_NTFY:
return true;
default:
@@ -1897,7 +1895,7 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
@@ -2015,7 +2013,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
int elantech_init_ps2(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
@@ -2036,7 +2034,7 @@ int elantech_init_ps2(struct psmouse *psmouse)
int elantech_init(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 72a083f3fc4a..4dc441309aac 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -238,7 +238,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
/* we're not spewing, but this packet might be the start */
priv->spew_flag = MAYBE_SPEWING;
- /* fall-through */
+ fallthrough;
case MAYBE_SPEWING:
priv->spew_count++;
@@ -249,7 +249,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
/* excessive spew detected, request recalibration */
priv->spew_flag = SPEW_DETECTED;
- /* fall-through */
+ fallthrough;
case SPEW_DETECTED:
/* only recalibrate when the overall delta to the cursor
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 0b75248c8380..c112980c2341 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -105,7 +105,7 @@ static void navpoint_packet(struct navpoint *navpoint)
case 0x19: /* Module 0, Hello packet */
if ((navpoint->data[1] & 0xf0) == 0x10)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
dev_warn(navpoint->dev,
"spurious packet: data=0x%02x,0x%02x,...\n",
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 527ae0b9a191..0b4a3039f312 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
+ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index e99d9bf1a267..2716d2ba386a 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
fsp_reg_write_enable(psmouse, false);
- return count;
+ return retval;
}
PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
@@ -794,7 +794,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
/* on-pad click, filter it if necessary */
if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC)
packet[0] &= ~FSP_PB0_LBTN;
- /* fall through */
+ fallthrough;
case FSP_PKT_TYPE_NORMAL:
/* normal packet */
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index ea9242d53899..caa79c177c55 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -128,7 +128,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MS:
sermouse->type = SERIO_MP;
- /* fall through */
+ fallthrough;
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
@@ -139,7 +139,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
- /* fall through */
+ fallthrough;
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index da0bf85321de..64590b86eb37 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -21,8 +21,6 @@
#elif defined(__arm__)
/* defined in include/asm-arm/arch-xxx/irqs.h */
#include <asm/irq.h>
-#elif defined(CONFIG_SH_CAYMAN)
-#include <asm/irq.h>
#elif defined(CONFIG_PPC)
extern int of_i8042_kbd_irq;
extern int of_i8042_aux_irq;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 0dddf273afd9..d3eda48032e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -562,7 +562,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
str = last_str;
break;
}
- /* fall through - report timeout */
+ fallthrough; /* report timeout */
case 0xfc:
case 0xfd:
case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index a8c94a940a79..8a16e41f7b7f 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -418,7 +418,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data)
ps2dev->nak = 0;
break;
}
- /* Fall through */
+ fallthrough;
default:
/*
* Do not signal errors if we get unexpected reply while
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 530fd15eaeca..25bf8be6e711 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -247,7 +247,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k
case KE_SW:
value = ke->sw.value;
- /* fall through */
+ fallthrough;
case KE_VSW:
input_report_switch(dev, ke->sw.code, value);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 96d65575f75a..44bb1f69b4b2 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -676,8 +676,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
/* Mask out the Y tilt value used for pressure */
device->buffer[7] = (u8)((device->buffer[7]) & 0x7F);
+ fallthrough;
- /* Fall thru */
case 4:
/* Tilt */
input_report_abs(inputdev, ABS_TILT_X,
@@ -685,8 +685,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
input_report_abs(inputdev, ABS_TILT_Y,
sign_extend32(device->buffer[7], 6));
+ fallthrough;
- /* Fall thru */
case 2:
case 3:
/* Convert buttons, only 5 bits possible */
@@ -695,8 +695,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
/* We don't apply any meaning to the bitmask,
just report */
input_event(inputdev, EV_MSC, MSC_SERIAL, val);
+ fallthrough;
- /* Fall thru */
case 1:
/* All reports have X and Y coords in the same place */
val = get_unaligned_le16(&device->buffer[1]);
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 38f087404f7a..749edbdb7ffa 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -146,7 +146,7 @@ static void pegasus_parse_packet(struct pegasus *pegasus)
/* xy data */
case BATTERY_LOW:
dev_warn_once(&dev->dev, "Pen battery low\n");
- /* fall through */
+ fallthrough;
case BATTERY_NO_REPORT:
case BATTERY_GOOD:
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 959c1d82aa66..1cedb45ba497 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -213,7 +213,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
case 0x3731: /* PL-710 */
wacom->res_x = 2540;
wacom->res_y = 2540;
- /* fall through */
+ fallthrough;
case 0x3535: /* PL-550 */
case 0x3830: /* PL-800 */
wacom->extra_z_bits = 2;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index a2189739e30f..98f17fa3a892 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -129,6 +130,7 @@ struct t9_range {
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
#define MXT_COMMS_CMD 1
+#define MXT_COMMS_RETRIGEN BIT(6)
/* MXT_DEBUG_DIAGNOSTIC_T37 */
#define MXT_DIAGNOSTIC_PAGEUP 0x01
@@ -308,6 +310,7 @@ struct mxt_data {
struct t7_config t7_cfg;
struct mxt_dbg dbg;
struct gpio_desc *reset_gpio;
+ bool use_retrigen_workaround;
/* Cached parameters from object table */
u16 T5_address;
@@ -318,6 +321,7 @@ struct mxt_data {
u16 T71_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
+ u16 T18_address;
u8 T19_reportid;
u16 T44_address;
u8 T100_reportid_min;
@@ -473,7 +477,7 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
bootloader = appmode - 0x24;
break;
}
- /* Fall through - for normal case */
+ fallthrough; /* for normal case */
case 0x4c:
case 0x4d:
case 0x5a:
@@ -1190,9 +1194,11 @@ static int mxt_acquire_irq(struct mxt_data *data)
enable_irq(data->irq);
- error = mxt_process_messages_until_invalid(data);
- if (error)
- return error;
+ if (data->use_retrigen_workaround) {
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
+ }
return 0;
}
@@ -1282,6 +1288,38 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
return crc;
}
+static int mxt_check_retrigen(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ int val;
+ struct irq_data *irqd;
+
+ data->use_retrigen_workaround = false;
+
+ irqd = irq_get_irq_data(data->irq);
+ if (!irqd)
+ return -EINVAL;
+
+ if (irqd_is_level_type(irqd))
+ return 0;
+
+ if (data->T18_address) {
+ error = __mxt_read_reg(client,
+ data->T18_address + MXT_COMMS_CTRL,
+ 1, &val);
+ if (error)
+ return error;
+
+ if (val & MXT_COMMS_RETRIGEN)
+ return 0;
+ }
+
+ dev_warn(&client->dev, "Enabling RETRIGEN workaround\n");
+ data->use_retrigen_workaround = true;
+ return 0;
+}
+
static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
struct device *dev = &data->client->dev;
@@ -1561,6 +1599,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
+ ret = mxt_check_retrigen(data);
+ if (ret)
+ goto release_mem;
+
ret = mxt_soft_reset(data);
if (ret)
goto release_mem;
@@ -1604,6 +1646,7 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T71_address = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
+ data->T18_address = 0;
data->T19_reportid = 0;
data->T44_address = 0;
data->T100_reportid_min = 0;
@@ -1678,6 +1721,9 @@ static int mxt_parse_object_table(struct mxt_data *data,
object->num_report_ids - 1;
data->num_touchids = object->num_report_ids;
break;
+ case MXT_SPT_COMMSCONFIG_T18:
+ data->T18_address = object->start_address;
+ break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;
break;
@@ -2141,6 +2187,10 @@ static int mxt_initialize(struct mxt_data *data)
if (error)
return error;
+ error = mxt_check_retrigen(data);
+ if (error)
+ return error;
+
error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
&client->dev, GFP_KERNEL, data,
mxt_config_cb);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3a4f18d3450d..6ff81d48da86 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -288,7 +288,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
wrbuf, 0, NULL);
- /* fallthrough */
+
case EDT_M09:
case EDT_M12:
case EV_FT:
@@ -330,7 +330,6 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
}
break;
- /* fallthrough */
case EDT_M09:
case EDT_M12:
case EV_FT:
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 5477a5718202..b0bd5bb079be 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -955,7 +955,7 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
break;
ts->state = ELAN_STATE_NORMAL;
- /* fall through */
+ fallthrough;
case ELAN_STATE_NORMAL:
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index d6772a2c2d09..e0bacd34866a 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -348,7 +348,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
case 1: /* 6-byte protocol */
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
- /* fall through */
+ fallthrough;
case 2: /* 4-byte protocol */
input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index e007e2e8f626..a6597f026980 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -8,7 +8,9 @@
*/
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
@@ -16,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/sizes.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
@@ -23,15 +26,59 @@
#define EXC3000_SLOTS_PER_FRAME 5
#define EXC3000_LEN_FRAME 66
#define EXC3000_LEN_POINT 10
-#define EXC3000_MT_EVENT 6
+
+#define EXC3000_LEN_MODEL_NAME 16
+#define EXC3000_LEN_FW_VERSION 16
+
+#define EXC3000_MT1_EVENT 0x06
+#define EXC3000_MT2_EVENT 0x18
+
#define EXC3000_TIMEOUT_MS 100
+#define EXC3000_RESET_MS 10
+#define EXC3000_READY_MS 100
+
+static const struct i2c_device_id exc3000_id[];
+
+struct eeti_dev_info {
+ const char *name;
+ int max_xy;
+};
+
+enum eeti_dev_id {
+ EETI_EXC3000,
+ EETI_EXC80H60,
+ EETI_EXC80H84,
+};
+
+static struct eeti_dev_info exc3000_info[] = {
+ [EETI_EXC3000] = {
+ .name = "EETI EXC3000 Touch Screen",
+ .max_xy = SZ_4K - 1,
+ },
+ [EETI_EXC80H60] = {
+ .name = "EETI EXC80H60 Touch Screen",
+ .max_xy = SZ_16K - 1,
+ },
+ [EETI_EXC80H84] = {
+ .name = "EETI EXC80H84 Touch Screen",
+ .max_xy = SZ_16K - 1,
+ },
+};
+
struct exc3000_data {
struct i2c_client *client;
+ const struct eeti_dev_info *info;
struct input_dev *input;
struct touchscreen_properties prop;
+ struct gpio_desc *reset;
struct timer_list timer;
u8 buf[2 * EXC3000_LEN_FRAME];
+ struct completion wait_event;
+ struct mutex query_lock;
+ int query_result;
+ char model[EXC3000_LEN_MODEL_NAME];
+ char fw_version[EXC3000_LEN_FW_VERSION];
};
static void exc3000_report_slots(struct input_dev *input,
@@ -58,10 +105,15 @@ static void exc3000_timer(struct timer_list *t)
input_sync(data->input);
}
-static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
+static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
{
+ struct i2c_client *client = data->client;
+ u8 expected_event = EXC3000_MT1_EVENT;
int ret;
+ if (data->info->max_xy == SZ_16K - 1)
+ expected_event = EXC3000_MT2_EVENT;
+
ret = i2c_master_send(client, "'", 2);
if (ret < 0)
return ret;
@@ -76,19 +128,21 @@ static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
if (ret != EXC3000_LEN_FRAME)
return -EIO;
- if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME ||
- buf[2] != EXC3000_MT_EVENT)
+ if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME)
+ return -EINVAL;
+
+ if (buf[2] != expected_event)
return -EINVAL;
return 0;
}
-static int exc3000_read_data(struct i2c_client *client,
+static int exc3000_read_data(struct exc3000_data *data,
u8 *buf, int *n_slots)
{
int error;
- error = exc3000_read_frame(client, buf);
+ error = exc3000_read_frame(data, buf);
if (error)
return error;
@@ -98,7 +152,7 @@ static int exc3000_read_data(struct i2c_client *client,
if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
/* Read 2nd frame to get the rest of the contacts. */
- error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME);
+ error = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME);
if (error)
return error;
@@ -110,6 +164,28 @@ static int exc3000_read_data(struct i2c_client *client,
return 0;
}
+static int exc3000_query_interrupt(struct exc3000_data *data)
+{
+ u8 *buf = data->buf;
+ int error;
+
+ error = i2c_master_recv(data->client, buf, EXC3000_LEN_FRAME);
+ if (error < 0)
+ return error;
+
+ if (buf[0] != 'B')
+ return -EPROTO;
+
+ if (buf[4] == 'E')
+ strlcpy(data->model, buf + 5, sizeof(data->model));
+ else if (buf[4] == 'D')
+ strlcpy(data->fw_version, buf + 5, sizeof(data->fw_version));
+ else
+ return -EPROTO;
+
+ return 0;
+}
+
static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
{
struct exc3000_data *data = dev_id;
@@ -118,7 +194,13 @@ static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
int slots, total_slots;
int error;
- error = exc3000_read_data(data->client, buf, &total_slots);
+ if (mutex_is_locked(&data->query_lock)) {
+ data->query_result = exc3000_query_interrupt(data);
+ complete(&data->wait_event);
+ goto out;
+ }
+
+ error = exc3000_read_data(data, buf, &total_slots);
if (error) {
/* Schedule a timer to release "stuck" contacts */
mod_timer(&data->timer,
@@ -145,31 +227,132 @@ out:
return IRQ_HANDLED;
}
-static int exc3000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ static const u8 request[68] = {
+ 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'D', 0x00
+ };
+ int error;
+
+ mutex_lock(&data->query_lock);
+
+ data->query_result = -ETIMEDOUT;
+ reinit_completion(&data->wait_event);
+
+ error = i2c_master_send(client, request, sizeof(request));
+ if (error < 0) {
+ mutex_unlock(&data->query_lock);
+ return error;
+ }
+
+ wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+ mutex_unlock(&data->query_lock);
+
+ if (data->query_result < 0)
+ return data->query_result;
+
+ return sprintf(buf, "%s\n", data->fw_version);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static ssize_t exc3000_get_model(struct exc3000_data *data)
+{
+ static const u8 request[68] = {
+ 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'E', 0x00
+ };
+ struct i2c_client *client = data->client;
+ int error;
+
+ mutex_lock(&data->query_lock);
+ data->query_result = -ETIMEDOUT;
+ reinit_completion(&data->wait_event);
+
+ error = i2c_master_send(client, request, sizeof(request));
+ if (error < 0) {
+ mutex_unlock(&data->query_lock);
+ return error;
+ }
+
+ wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+ mutex_unlock(&data->query_lock);
+
+ return data->query_result;
+}
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ int error;
+
+ error = exc3000_get_model(data);
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%s\n", data->model);
+}
+static DEVICE_ATTR_RO(model);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_model.attr,
+ NULL
+};
+
+static struct attribute_group exc3000_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int exc3000_probe(struct i2c_client *client)
{
struct exc3000_data *data;
struct input_dev *input;
- int error;
+ int error, max_xy, retry;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ data->info = device_get_match_data(&client->dev);
+ if (!data->info) {
+ enum eeti_dev_id eeti_dev_id =
+ i2c_match_id(exc3000_id, client)->driver_data;
+ data->info = &exc3000_info[eeti_dev_id];
+ }
timer_setup(&data->timer, exc3000_timer, 0);
+ init_completion(&data->wait_event);
+ mutex_init(&data->query_lock);
+
+ data->reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset))
+ return PTR_ERR(data->reset);
+
+ if (data->reset) {
+ msleep(EXC3000_RESET_MS);
+ gpiod_set_value_cansleep(data->reset, 0);
+ msleep(EXC3000_READY_MS);
+ }
input = devm_input_allocate_device(&client->dev);
if (!input)
return -ENOMEM;
data->input = input;
+ input_set_drvdata(input, data);
- input->name = "EETI EXC3000 Touch Screen";
+ input->name = data->info->name;
input->id.bustype = BUS_I2C;
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ max_xy = data->info->max_xy;
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+
touchscreen_parse_properties(input, true, &data->prop);
error = input_mt_init_slots(input, EXC3000_NUM_SLOTS,
@@ -187,18 +370,49 @@ static int exc3000_probe(struct i2c_client *client,
if (error)
return error;
+ /*
+ * I²C does not have built-in recovery, so retry on failure. This
+ * ensures, that the device probe will not fail for temporary issues
+ * on the bus. This is not needed for the sysfs calls (userspace
+ * will receive the error code and can start another query) and
+ * cannot be done for touch events (but that only means loosing one
+ * or two touch events anyways).
+ */
+ for (retry = 0; retry < 3; retry++) {
+ error = exc3000_get_model(data);
+ if (!error)
+ break;
+ dev_warn(&client->dev, "Retry %d get EETI EXC3000 model: %d\n",
+ retry + 1, error);
+ }
+
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev, "TS Model: %s", data->model);
+
+ i2c_set_clientdata(client, data);
+
+ error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
+ if (error)
+ return error;
+
return 0;
}
static const struct i2c_device_id exc3000_id[] = {
- { "exc3000", 0 },
+ { "exc3000", EETI_EXC3000 },
+ { "exc80h60", EETI_EXC80H60 },
+ { "exc80h84", EETI_EXC80H84 },
{ }
};
MODULE_DEVICE_TABLE(i2c, exc3000_id);
#ifdef CONFIG_OF
static const struct of_device_id exc3000_of_match[] = {
- { .compatible = "eeti,exc3000" },
+ { .compatible = "eeti,exc3000", .data = &exc3000_info[EETI_EXC3000] },
+ { .compatible = "eeti,exc80h60", .data = &exc3000_info[EETI_EXC80H60] },
+ { .compatible = "eeti,exc80h84", .data = &exc3000_info[EETI_EXC80H84] },
{ }
};
MODULE_DEVICE_TABLE(of, exc3000_of_match);
@@ -210,7 +424,7 @@ static struct i2c_driver exc3000_driver = {
.of_match_table = of_match_ptr(exc3000_of_match),
},
.id_table = exc3000_id,
- .probe = exc3000_probe,
+ .probe_new = exc3000_probe,
};
module_i2c_driver(exc3000_driver);
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 5875bb1099a8..3162b68f7374 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -289,7 +289,7 @@ static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr)
break;
case IQS5XX_BL_CMD_EXEC:
usleep_range(10000, 10100);
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 1af08d3dfaf7..f15713aaebc2 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -130,7 +130,6 @@ static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id)
switch (buf[1] & EVENT_TAG_MASK) {
case EVENT_INIT:
- /* fall through */
case EVENT_MIDDLE:
input_report_abs(data->input_dev, ABS_X, x);
input_report_abs(data->input_dev, ABS_Y, y);
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index b54cc64e4ea6..df946869d4cd 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -255,7 +255,7 @@ static void stmfts_parse_events(struct stmfts_data *sdata)
case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY:
case STMFTS_EV_STATUS:
complete(&sdata->cmd_done);
- /* fall through */
+ fallthrough;
case STMFTS_EV_NO_EVENT:
case STMFTS_EV_DEBUG:
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 607d1aeb595d..bb1699e0d3c7 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -290,7 +290,7 @@ static int wm831x_ts_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n",
pdata->isel);
- /* Fall through */
+ fallthrough;
case 200:
case 0:
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b622af72448f..bef5d75e306b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -129,140 +129,8 @@ config MSM_IOMMU
If unsure, say N here.
-config IOMMU_PGTABLES_L2
- def_bool y
- depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
-
-# AMD IOMMU support
-config AMD_IOMMU
- bool "AMD IOMMU support"
- select SWIOTLB
- select PCI_MSI
- select PCI_ATS
- select PCI_PRI
- select PCI_PASID
- select IOMMU_API
- select IOMMU_IOVA
- select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
- help
- With this option you can enable support for AMD IOMMU hardware in
- your system. An IOMMU is a hardware component which provides
- remapping of DMA memory accesses from devices. With an AMD IOMMU you
- can isolate the DMA memory of different devices and protect the
- system from misbehaving device drivers or hardware.
-
- You can find out if your system has an AMD IOMMU if you look into
- your BIOS for an option to enable it or if you have an IVRS ACPI
- table.
-
-config AMD_IOMMU_V2
- tristate "AMD IOMMU Version 2 driver"
- depends on AMD_IOMMU
- select MMU_NOTIFIER
- help
- This option enables support for the AMD IOMMUv2 features of the IOMMU
- hardware. Select this option if you want to use devices that support
- the PCI PRI and PASID interface.
-
-config AMD_IOMMU_DEBUGFS
- bool "Enable AMD IOMMU internals in DebugFS"
- depends on AMD_IOMMU && IOMMU_DEBUGFS
- help
- !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
-
- DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
- Exposes AMD IOMMU device internals in DebugFS.
-
- This option is -NOT- intended for production environments, and should
- not generally be enabled.
-
-# Intel IOMMU support
-config DMAR_TABLE
- bool
-
-config INTEL_IOMMU
- bool "Support for Intel IOMMU using DMA Remapping Devices"
- depends on PCI_MSI && ACPI && (X86 || IA64)
- select DMA_OPS
- select IOMMU_API
- select IOMMU_IOVA
- select NEED_DMA_MAP_STATE
- select DMAR_TABLE
- select SWIOTLB
- select IOASID
- help
- DMA remapping (DMAR) devices support enables independent address
- translations for Direct Memory Access (DMA) from devices.
- These DMA remapping devices are reported via ACPI tables
- and include PCI device scope covered by these DMA
- remapping devices.
-
-config INTEL_IOMMU_DEBUGFS
- bool "Export Intel IOMMU internals in Debugfs"
- depends on INTEL_IOMMU && IOMMU_DEBUGFS
- help
- !!!WARNING!!!
-
- DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
-
- Expose Intel IOMMU internals in Debugfs.
-
- This option is -NOT- intended for production environments, and should
- only be enabled for debugging Intel IOMMU.
-
-config INTEL_IOMMU_SVM
- bool "Support for Shared Virtual Memory with Intel IOMMU"
- depends on INTEL_IOMMU && X86_64
- select PCI_PASID
- select PCI_PRI
- select MMU_NOTIFIER
- select IOASID
- help
- Shared Virtual Memory (SVM) provides a facility for devices
- to access DMA resources through process address space by
- means of a Process Address Space ID (PASID).
-
-config INTEL_IOMMU_DEFAULT_ON
- def_bool y
- prompt "Enable Intel DMA Remapping Devices by default"
- depends on INTEL_IOMMU
- help
- Selecting this option will enable a DMAR device at boot time if
- one is found. If this option is not selected, DMAR support can
- be enabled by passing intel_iommu=on to the kernel.
-
-config INTEL_IOMMU_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on INTEL_IOMMU && BROKEN && X86
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
-config INTEL_IOMMU_FLOPPY_WA
- def_bool y
- depends on INTEL_IOMMU && X86
- help
- Floppy disk drivers are known to bypass DMA API calls
- thereby failing to work when IOMMU is enabled. This
- workaround will setup a 1:1 mapping for the first
- 16MiB to make floppy (an ISA device) work.
-
-config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
- bool "Enable Intel IOMMU scalable mode by default"
- depends on INTEL_IOMMU
- help
- Selecting this option will enable by default the scalable mode if
- hardware presents the capability. The scalable mode is defined in
- VT-d 3.0. The scalable mode capability could be checked by reading
- /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
- is not selected, scalable mode support could also be enabled by
- passing intel_iommu=sm_on to the kernel. If not sure, please use
- the default value.
+source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/intel/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
@@ -276,7 +144,6 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
help
@@ -294,7 +161,6 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
- depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -311,7 +177,6 @@ config SUN50I_IOMMU
depends on ARCH_SUNXI || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
- select IOMMU_DMA
help
Support for the IOMMU introduced in the Allwinner H6 SoCs.
@@ -338,7 +203,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -361,7 +226,6 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
- depends on ARM || IOMMU_DMA
depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@@ -383,7 +247,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
+ depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -469,11 +333,9 @@ config S390_AP_IOMMU
config MTK_IOMMU
bool "MTK IOMMU Support"
- depends on HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
- select IOMMU_DMA
select IOMMU_IO_PGTABLE_ARMV7S
select MEMORY
select MTK_SMI
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 342190196dfb..11f1771104f3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-y += amd/ intel/ arm/
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
@@ -11,19 +12,8 @@ obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o
-obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
-obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
-obj-$(CONFIG_DMAR_TABLE) += intel/dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/iommu.o intel/pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/trace.o
-obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel/debugfs.o
-obj-$(CONFIG_INTEL_IOMMU_SVM) += intel/svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
-obj-$(CONFIG_IRQ_REMAP) += intel/irq_remapping.o irq_remapping.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
@@ -35,6 +25,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
-obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
new file mode 100644
index 000000000000..1f061d91e0b8
--- /dev/null
+++ b/drivers/iommu/amd/Kconfig
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# AMD IOMMU support
+config AMD_IOMMU
+ bool "AMD IOMMU support"
+ select SWIOTLB
+ select PCI_MSI
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
+ select IOMMU_API
+ select IOMMU_IOVA
+ select IOMMU_DMA
+ depends on X86_64 && PCI && ACPI
+ help
+ With this option you can enable support for AMD IOMMU hardware in
+ your system. An IOMMU is a hardware component which provides
+ remapping of DMA memory accesses from devices. With an AMD IOMMU you
+ can isolate the DMA memory of different devices and protect the
+ system from misbehaving device drivers or hardware.
+
+ You can find out if your system has an AMD IOMMU if you look into
+ your BIOS for an option to enable it or if you have an IVRS ACPI
+ table.
+
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver"
+ depends on AMD_IOMMU
+ select MMU_NOTIFIER
+ help
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the PCI PRI and PASID interface.
+
+config AMD_IOMMU_DEBUGFS
+ bool "Enable AMD IOMMU internals in DebugFS"
+ depends on AMD_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+ Exposes AMD IOMMU device internals in DebugFS.
+
+ This option is -NOT- intended for production environments, and should
+ not generally be enabled.
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
new file mode 100644
index 000000000000..dc5a2fa4fd37
--- /dev/null
+++ b/drivers/iommu/amd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 6ebd4825e320..c652f16eb702 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -720,21 +720,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
static void __init free_ppr_log(struct amd_iommu *iommu)
{
- if (iommu->ppr_log == NULL)
- return;
-
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- if (iommu->ga_log)
- free_pages((unsigned long)iommu->ga_log,
- get_order(GA_LOG_SIZE));
- if (iommu->ga_log_tail)
- free_pages((unsigned long)iommu->ga_log_tail,
- get_order(8));
+ free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
+ free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
#endif
}
@@ -1842,7 +1835,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pci_info(pdev, "Extended features (%#llx):\n",
+ pci_info(pdev, "Extended features (%#llx):",
iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
@@ -2265,7 +2258,7 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
iommu_feature_enable(iommu, CONTROL_GAM_EN);
- /* Fall through */
+ fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 2f22326ee4df..ba9f3dbc5b94 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -162,7 +162,18 @@ static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
}
-static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+ atomic64_set(&domain->pt_root, root);
+}
+
+static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+ amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode)
{
u64 pt_root;
@@ -170,7 +181,7 @@ static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
pt_root = mode & 7;
pt_root |= (u64)root;
- return pt_root;
+ amd_iommu_domain_set_pt_root(domain, pt_root);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid)
@@ -1410,7 +1421,7 @@ static bool increase_address_space(struct protection_domain *domain,
struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
- u64 *pte, root;
+ u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
@@ -1438,8 +1449,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
- root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
- atomic64_set(&domain->pt_root, root);
+ amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
ret = true;
@@ -2319,7 +2329,7 @@ static void protection_domain_free(struct protection_domain *domain)
domain_id_free(domain->id);
amd_iommu_domain_get_pgtable(domain, &pgtable);
- atomic64_set(&domain->pt_root, 0);
+ amd_iommu_domain_clr_pt_root(domain);
free_pagetable(&pgtable);
kfree(domain);
@@ -2327,7 +2337,7 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain, int mode)
{
- u64 *pt_root = NULL, root;
+ u64 *pt_root = NULL;
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
@@ -2343,8 +2353,7 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return -ENOMEM;
}
- root = amd_iommu_domain_encode_pgtable(pt_root, mode);
- atomic64_set(&domain->pt_root, root);
+ amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
@@ -2713,8 +2722,8 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
/* First save pgtable configuration*/
amd_iommu_domain_get_pgtable(domain, &pgtable);
- /* Update data structure */
- atomic64_set(&domain->pt_root, 0);
+ /* Remove page-table from domain */
+ amd_iommu_domain_clr_pt_root(domain);
/* Make changes visible to IOMMUs */
update_domain(domain);
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index e4b025c5637c..c259108ab6dd 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -495,7 +495,7 @@ static void do_fault(struct work_struct *work)
if (access_error(vma, fault))
goto out;
- ret = handle_mm_fault(vma, address, flags);
+ ret = handle_mm_fault(vma, address, flags, NULL);
out:
mmap_read_unlock(mm);
diff --git a/drivers/iommu/arm/Makefile b/drivers/iommu/arm/Makefile
new file mode 100644
index 000000000000..0f9efeab709f
--- /dev/null
+++ b/drivers/iommu/arm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += arm-smmu/ arm-smmu-v3/
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
new file mode 100644
index 000000000000..569e24e9f162
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f578677a5c41..c192544e874b 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -903,7 +903,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_CFGI_STE:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
@@ -936,7 +936,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_TLBI_NH_ASID:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
@@ -1036,7 +1036,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
*/
return;
case CMDQ_ERR_CERROR_ILL_IDX:
- /* Fallthrough */
default:
break;
}
@@ -1479,7 +1478,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
}
/*
- * Try to unlock the cmq lock. This will fail if we're the last
+ * Try to unlock the cmdq lock. This will fail if we're the last
* reader, in which case we can safely update cmdq->q.llq.cons
*/
if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
@@ -2850,7 +2849,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return -ENODEV;
- return ops->map(ops, iova, paddr, size, prot);
+ return ops->map(ops, iova, paddr, size, prot, gfp);
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -3758,7 +3757,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
- /* Fallthrough */
+ fallthrough;
case IDR0_STALL_MODEL_STALL:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
@@ -3778,7 +3777,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
switch (FIELD_GET(IDR0_TTF, reg)) {
case IDR0_TTF_AARCH32_64:
smmu->ias = 40;
- /* Fallthrough */
+ fallthrough;
case IDR0_TTF_AARCH64:
break;
default:
@@ -3875,7 +3874,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
default:
dev_info(smmu->dev,
"unknown output address size. Truncating to 48-bit\n");
- /* Fallthrough */
+ fallthrough;
case IDR5_OAS_48_BIT:
smmu->oas = 48;
}
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
new file mode 100644
index 000000000000..e240a7bcf310
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index c75b9d957b70..f4ff124a1967 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -147,16 +147,57 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
.reset = arm_mmu500_reset,
};
+static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the readq to double readl
+ */
+ return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
+}
+
+static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
+ u64 val)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the writeq to double writel
+ */
+ hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
+}
+
+static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
+{
+
+ /*
+ * Armada-AP806 erratum #582743.
+ * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
+ * formats altogether and allow using 32 bits access on the
+ * interconnect.
+ */
+ smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
+ ARM_SMMU_FEAT_FMT_AARCH64_16K |
+ ARM_SMMU_FEAT_FMT_AARCH64_64K);
+
+ return 0;
+}
+
+static const struct arm_smmu_impl mrvl_mmu500_impl = {
+ .read_reg64 = mrvl_mmu500_readq,
+ .write_reg64 = mrvl_mmu500_writeq,
+ .cfg_probe = mrvl_mmu500_cfg_probe,
+ .reset = arm_mmu500_reset,
+};
+
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
/*
- * We will inevitably have to combine model-specific implementation
- * quirks with platform-specific integration quirks, but everything
- * we currently support happens to work out as straightforward
- * mutually-exclusive assignments.
+ * Set the impl for model-specific implementation quirks first,
+ * such that platform integration quirks can pick it up and
+ * inherit from it if necessary.
*/
switch (smmu->model) {
case ARM_MMU500:
@@ -168,12 +209,21 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
break;
}
+ /* This is implicitly MMU-400 */
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
+ return nvidia_smmu_impl_init(smmu);
+
if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
- of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
+ of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
return qcom_smmu_impl_init(smmu);
+ if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
+ smmu->impl = &mrvl_mmu500_impl;
+
return smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
new file mode 100644
index 000000000000..31368057e9be
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "arm-smmu.h"
+
+/*
+ * Tegra194 has three ARM MMU-500 Instances.
+ * Two of them are used together and must be programmed identically for
+ * interleaved IOVA accesses across them and translates accesses from
+ * non-isochronous HW devices.
+ * Third one is used for translating accesses from isochronous HW devices.
+ * This implementation supports programming of the two instances that must
+ * be programmed identically.
+ * The third instance usage is through standard arm-smmu driver itself and
+ * is out of scope of this implementation.
+ */
+#define NUM_SMMU_INSTANCES 2
+
+struct nvidia_smmu {
+ struct arm_smmu_device smmu;
+ void __iomem *bases[NUM_SMMU_INSTANCES];
+};
+
+static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
+ unsigned int inst, int page)
+{
+ struct nvidia_smmu *nvidia_smmu;
+
+ nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
+ return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
+}
+
+static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readl_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
+ int page, int offset, u32 val)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writel_relaxed(val, reg);
+ }
+}
+
+static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readq_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
+ int page, int offset, u64 val)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writeq_relaxed(val, reg);
+ }
+}
+
+static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int delay;
+
+ arm_smmu_writel(smmu, page, sync, 0);
+
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ unsigned int spin_cnt;
+
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ u32 val = 0;
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg;
+
+ reg = nvidia_smmu_page(smmu, i, page) + status;
+ val |= readl_relaxed(reg);
+ }
+
+ if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+
+ cpu_relax();
+ }
+
+ udelay(delay);
+ }
+
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
+}
+
+static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ u32 val;
+ void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
+ ARM_SMMU_GR0_sGFSR;
+
+ /* clear global FSR */
+ val = readl_relaxed(reg);
+ writel_relaxed(val, reg);
+ }
+
+ return 0;
+}
+
+static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
+ struct arm_smmu_device *smmu,
+ int inst)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+ void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
+
+ gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ if (!gfsr)
+ return IRQ_NONE;
+
+ gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err_ratelimited(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+ writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
+{
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu = dev;
+
+ for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ irqreturn_t irq_ret;
+
+ irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
+ struct arm_smmu_device *smmu,
+ int idx, int inst)
+{
+ u32 fsr, fsynr, cbfrsynra;
+ unsigned long iova;
+ void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
+ void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+ iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
+{
+ int idx;
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain;
+
+ smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
+ smmu = smmu_domain->smmu;
+
+ for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ irqreturn_t irq_ret;
+
+ /*
+ * Interrupt line is shared between all contexts.
+ * Check for faults across all contexts.
+ */
+ for (idx = 0; idx < smmu->num_context_banks; idx++) {
+ irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
+ idx, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static const struct arm_smmu_impl nvidia_smmu_impl = {
+ .read_reg = nvidia_smmu_read_reg,
+ .write_reg = nvidia_smmu_write_reg,
+ .read_reg64 = nvidia_smmu_read_reg64,
+ .write_reg64 = nvidia_smmu_write_reg64,
+ .reset = nvidia_smmu_reset,
+ .tlb_sync = nvidia_smmu_tlb_sync,
+ .global_fault = nvidia_smmu_global_fault,
+ .context_fault = nvidia_smmu_context_fault,
+};
+
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct resource *res;
+ struct device *dev = smmu->dev;
+ struct nvidia_smmu *nvidia_smmu;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
+ if (!nvidia_smmu)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Copy the data from struct arm_smmu_device *smmu allocated in
+ * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
+ * pointer used in arm-smmu.c once this function returns.
+ * This is necessary to derive nvidia_smmu from smmu pointer passed
+ * through arm_smmu_impl function calls subsequently.
+ */
+ nvidia_smmu->smmu = *smmu;
+ /* Instance 0 is ioremapped by arm-smmu.c. */
+ nvidia_smmu->bases[0] = smmu->base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nvidia_smmu->bases[1]))
+ return ERR_CAST(nvidia_smmu->bases[1]);
+
+ nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+
+ /*
+ * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
+ * Once this function returns, arm-smmu.c would use arm_smmu_device
+ * allocated as part of struct nvidia_smmu.
+ */
+ devm_kfree(dev, smmu);
+
+ return &nvidia_smmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index be4318044f96..be4318044f96 100644
--- a/drivers/iommu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 243bc4cb2705..09c42af9f31e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -52,9 +52,6 @@
*/
#define QCOM_DUMMY_VAL -1
-#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
-#define TLB_SPIN_COUNT 10
-
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -673,6 +670,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ irqreturn_t (*context_fault)(int irq, void *dev);
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@@ -835,7 +833,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+
+ if (smmu->impl && smmu->impl->context_fault)
+ context_fault = smmu->impl->context_fault;
+ else
+ context_fault = arm_smmu_context_fault;
+
+ ret = devm_request_irq(smmu->dev, irq, context_fault,
IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@@ -1227,7 +1231,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
arm_smmu_rpm_get(smmu);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, gfp);
arm_smmu_rpm_put(smmu);
return ret;
@@ -1728,7 +1732,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned int size;
u32 id;
bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
- int i;
+ int i, ret;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n",
@@ -1891,6 +1895,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
+ if (smmu->impl && smmu->impl->cfg_probe) {
+ ret = smmu->impl->cfg_probe(smmu);
+ if (ret)
+ return ret;
+ }
+
/* Now we've corralled the various formats, what'll it do? */
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
@@ -1918,9 +1928,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
smmu->ipa_size, smmu->pa_size);
- if (smmu->impl && smmu->impl->cfg_probe)
- return smmu->impl->cfg_probe(smmu);
-
return 0;
}
@@ -1946,6 +1953,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+ { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
@@ -2107,6 +2115,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2123,10 +2132,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- smmu = arm_smmu_impl_init(smmu);
- if (IS_ERR(smmu))
- return PTR_ERR(smmu);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
@@ -2138,6 +2143,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
*/
smmu->numpage = resource_size(res);
+ smmu = arm_smmu_impl_init(smmu);
+ if (IS_ERR(smmu))
+ return PTR_ERR(smmu);
+
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@@ -2193,9 +2202,14 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->num_context_irqs = smmu->num_context_banks;
}
+ if (smmu->impl && smmu->impl->global_fault)
+ global_fault = smmu->impl->global_fault;
+ else
+ global_fault = arm_smmu_global_fault;
+
for (i = 0; i < smmu->num_global_irqs; ++i) {
err = devm_request_irq(smmu->dev, smmu->irqs[i],
- arm_smmu_global_fault,
+ global_fault,
IRQF_SHARED,
"arm-smmu global fault",
smmu);
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d172c024be61..d890a4a968e8 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -18,6 +18,7 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
+#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -236,6 +237,8 @@ enum arm_smmu_cbar_type {
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
+#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+#define TLB_SPIN_COUNT 10
/* Shared driver definitions */
enum arm_smmu_arch_version {
@@ -387,6 +390,8 @@ struct arm_smmu_impl {
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
+ irqreturn_t (*global_fault)(int irq, void *dev);
+ irqreturn_t (*context_fault)(int irq, void *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -450,6 +455,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index d176df569af8..af6bec3ace00 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -37,14 +37,20 @@
#define SMMU_INTR_SEL_NS 0x2000
+enum qcom_iommu_clk {
+ CLK_IFACE,
+ CLK_BUS,
+ CLK_TBU,
+ CLK_NUM,
+};
+
struct qcom_iommu_ctx;
struct qcom_iommu_dev {
/* IOMMU core code handle */
struct iommu_device iommu;
struct device *dev;
- struct clk *iface_clk;
- struct clk *bus_clk;
+ struct clk_bulk_data clks[CLK_NUM];
void __iomem *local_base;
u32 sec_id;
u8 num_ctxs;
@@ -301,7 +307,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
ARM_SMMU_SCTLR_CFCFG;
- if (IS_ENABLED(CONFIG_BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= ARM_SMMU_SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
@@ -438,7 +444,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret;
}
@@ -613,32 +619,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
-static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
- int ret;
-
- ret = clk_prepare_enable(qcom_iommu->iface_clk);
- if (ret) {
- dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(qcom_iommu->bus_clk);
- if (ret) {
- dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
- clk_disable_unprepare(qcom_iommu->iface_clk);
- return ret;
- }
-
- return 0;
-}
-
-static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
- clk_disable_unprepare(qcom_iommu->bus_clk);
- clk_disable_unprepare(qcom_iommu->iface_clk);
-}
-
static int qcom_iommu_sec_ptbl_init(struct device *dev)
{
size_t psize = 0;
@@ -795,6 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
struct qcom_iommu_dev *qcom_iommu;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct clk *clk;
int ret, max_asid = 0;
/* find the max asid (which is 1:1 to ctx bank idx), so we know how
@@ -817,17 +798,26 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
return PTR_ERR(qcom_iommu->local_base);
}
- qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(qcom_iommu->iface_clk)) {
+ clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get iface clock\n");
- return PTR_ERR(qcom_iommu->iface_clk);
+ return PTR_ERR(clk);
}
+ qcom_iommu->clks[CLK_IFACE].clk = clk;
- qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
- if (IS_ERR(qcom_iommu->bus_clk)) {
+ clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(qcom_iommu->bus_clk);
+ return PTR_ERR(clk);
+ }
+ qcom_iommu->clks[CLK_BUS].clk = clk;
+
+ clk = devm_clk_get_optional(dev, "tbu");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get tbu clock\n");
+ return PTR_ERR(clk);
}
+ qcom_iommu->clks[CLK_TBU].clk = clk;
if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
&qcom_iommu->sec_id)) {
@@ -899,14 +889,14 @@ static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
- return qcom_iommu_enable_clocks(qcom_iommu);
+ return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
- qcom_iommu_disable_clocks(qcom_iommu);
+ clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
return 0;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4959f5df21bd..5141d49a046b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
- gfp);
+ page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
+ gfp, NULL);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 60c8a56e4a3f..bad3c0ce10cb 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -173,7 +173,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
-#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache;
@@ -226,7 +226,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
};
/*
- * This structure is attached to dev.archdata.iommu of the master device
+ * This structure is attached to dev->iommu->priv of the master device
* on device add, contains a list of SYSMMU controllers defined by device tree,
* which are bound to given master device. It is usually referenced by 'owner'
* pointer.
@@ -670,7 +670,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct device *master = data->master;
if (master) {
- struct exynos_iommu_owner *owner = master->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@@ -688,7 +688,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct device *master = data->master;
if (master) {
- struct exynos_iommu_owner *owner = master->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@@ -721,7 +721,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
}
};
-static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
+static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
@@ -837,8 +837,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
@@ -875,8 +875,8 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
@@ -933,7 +933,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
if (!pent)
return ERR_PTR(-ENOMEM);
- update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
+ exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
@@ -994,7 +994,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- update_pte(sent, mk_lv1ent_sect(paddr, prot));
+ exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -1018,7 +1018,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- update_pte(pent, mk_lv2ent_spage(paddr, prot));
+ exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@@ -1150,7 +1150,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
/* workaround for h/w bug in System MMU v3.3 */
- update_pte(ent, ZERO_LV2LINK);
+ exynos_iommu_set_pte(ent, ZERO_LV2LINK);
size = SECT_SIZE;
goto done;
}
@@ -1171,7 +1171,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
if (lv2ent_small(ent)) {
- update_pte(ent, 0);
+ exynos_iommu_set_pte(ent, 0);
size = SPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
@@ -1237,7 +1237,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@@ -1263,7 +1263,7 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
static void exynos_iommu_release_device(struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@@ -1287,8 +1287,8 @@ static void exynos_iommu_release_device(struct device *dev)
static int exynos_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data, *entry;
if (!sysmmu)
@@ -1305,7 +1305,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
- dev->archdata.iommu = owner;
+ dev_iommu_priv_set(dev, owner);
}
list_for_each_entry(entry, &owner->controllers, owner_node)
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index cde281b97afa..099a11a35fb9 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -1174,10 +1174,7 @@ error:
if (irq != NO_IRQ)
free_irq(irq, data);
- if (data) {
- memset(data, 0, sizeof(struct pamu_isr_data));
- kfree(data);
- }
+ kzfree(data);
if (pamu_regs)
iounmap(pamu_regs);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 928d37771ece..b2110767caf4 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -323,7 +323,7 @@ static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
pamu_disable_liodn(info->liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
- info->dev->archdata.iommu_domain = NULL;
+ dev_iommu_priv_set(info->dev, NULL);
kmem_cache_free(iommu_devinfo_cache, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -352,7 +352,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it.
*/
- old_domain_info = dev->archdata.iommu_domain;
+ old_domain_info = dev_iommu_priv_get(dev);
if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain);
@@ -371,8 +371,8 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* the info for the first LIODN as all
* LIODNs share the same domain
*/
- if (!dev->archdata.iommu_domain)
- dev->archdata.iommu_domain = info;
+ if (!dev_iommu_priv_get(dev))
+ dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
new file mode 100644
index 000000000000..5337ee1584b0
--- /dev/null
+++ b/drivers/iommu/intel/Kconfig
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Intel IOMMU support
+config DMAR_TABLE
+ bool
+
+config INTEL_IOMMU
+ bool "Support for Intel IOMMU using DMA Remapping Devices"
+ depends on PCI_MSI && ACPI && (X86 || IA64)
+ select DMA_OPS
+ select IOMMU_API
+ select IOMMU_IOVA
+ select NEED_DMA_MAP_STATE
+ select DMAR_TABLE
+ select SWIOTLB
+ select IOASID
+ help
+ DMA remapping (DMAR) devices support enables independent address
+ translations for Direct Memory Access (DMA) from devices.
+ These DMA remapping devices are reported via ACPI tables
+ and include PCI device scope covered by these DMA
+ remapping devices.
+
+config INTEL_IOMMU_DEBUGFS
+ bool "Export Intel IOMMU internals in Debugfs"
+ depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+ Expose Intel IOMMU internals in Debugfs.
+
+ This option is -NOT- intended for production environments, and should
+ only be enabled for debugging Intel IOMMU.
+
+config INTEL_IOMMU_SVM
+ bool "Support for Shared Virtual Memory with Intel IOMMU"
+ depends on INTEL_IOMMU && X86_64
+ select PCI_PASID
+ select PCI_PRI
+ select MMU_NOTIFIER
+ select IOASID
+ help
+ Shared Virtual Memory (SVM) provides a facility for devices
+ to access DMA resources through process address space by
+ means of a Process Address Space ID (PASID).
+
+config INTEL_IOMMU_DEFAULT_ON
+ def_bool y
+ prompt "Enable Intel DMA Remapping Devices by default"
+ depends on INTEL_IOMMU
+ help
+ Selecting this option will enable a DMAR device at boot time if
+ one is found. If this option is not selected, DMAR support can
+ be enabled by passing intel_iommu=on to the kernel.
+
+config INTEL_IOMMU_BROKEN_GFX_WA
+ bool "Workaround broken graphics drivers (going away soon)"
+ depends on INTEL_IOMMU && BROKEN && X86
+ help
+ Current Graphics drivers tend to use physical address
+ for DMA and avoid using DMA APIs. Setting this config
+ option permits the IOMMU driver to set a unity map for
+ all the OS-visible memory. Hence the driver can continue
+ to use physical addresses for DMA, at least until this
+ option is removed in the 2.6.32 kernel.
+
+config INTEL_IOMMU_FLOPPY_WA
+ def_bool y
+ depends on INTEL_IOMMU && X86
+ help
+ Floppy disk drivers are known to bypass DMA API calls
+ thereby failing to work when IOMMU is enabled. This
+ workaround will setup a 1:1 mapping for the first
+ 16MiB to make floppy (an ISA device) work.
+
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+ bool "Enable Intel IOMMU scalable mode by default"
+ depends on INTEL_IOMMU
+ help
+ Selecting this option will enable by default the scalable mode if
+ hardware presents the capability. The scalable mode is defined in
+ VT-d 3.0. The scalable mode capability could be checked by reading
+ /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+ is not selected, scalable mode support could also be enabled by
+ passing intel_iommu=sm_on to the kernel. If not sure, please use
+ the default value.
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
new file mode 100644
index 000000000000..fb8e1e8c8029
--- /dev/null
+++ b/drivers/iommu/intel/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMAR_TABLE) += dmar.o
+obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
+obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index cf1ebb98e418..efea7f02abd9 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -15,7 +15,7 @@
#include <asm/irq_remapping.h>
-#include "intel-pasid.h"
+#include "pasid.h"
struct tbl_walk {
u16 bus;
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 683b812c5c47..93e6345f3414 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
drhd->iommu = iommu;
+ iommu->drhd = drhd;
return 0;
@@ -1438,8 +1439,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
/* PASID-based device IOTLB Invalidate */
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order, u64 granu)
+ u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
{
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1447,7 +1447,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);
- desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
/*
* If S bit is 0, we only flush a single page. If S bit is set,
@@ -1458,9 +1457,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- desc.qw1 |= addr & ~mask;
- if (size_order)
+ if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
+ addr, size_order);
+
+ /* Take page address */
+ desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
+
+ if (size_order) {
+ /*
+ * Existing 0s in address below size_order may be the least
+ * significant bit, we must set them to 1s to avoid having
+ * smaller size than desired.
+ */
+ desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
+ VTD_PAGE_SHIFT);
+ /* Clear size_order bit to indicate size */
+ desc.qw1 &= ~mask;
+ /* Set the S bit to indicate flushing more than 1 page */
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+ }
qi_submit_sync(iommu, &desc, 1, 0);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 237a470e1e9c..f8177c59d229 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -48,7 +48,7 @@
#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
-#include "intel-pasid.h"
+#include "pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -356,6 +356,7 @@ static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int intel_no_bounce;
+static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
@@ -372,7 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
if (!dev)
return NULL;
- info = dev->archdata.iommu;
+ info = dev_iommu_priv_get(dev);
if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
@@ -743,12 +744,12 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
static int iommu_dummy(struct device *dev)
{
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+ return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
}
static bool attach_deferred(struct device *dev)
{
- return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+ return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
}
/**
@@ -778,16 +779,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
-static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
+ struct pci_dev *pdev = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
- if (iommu_dummy(dev))
+ if (!dev || iommu_dummy(dev))
return NULL;
if (dev_is_pci(dev)) {
@@ -818,8 +819,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && pdev->is_virtfn)
goto got_pdev;
- *bus = drhd->devices[i].bus;
- *devfn = drhd->devices[i].devfn;
+ if (bus && devfn) {
+ *bus = drhd->devices[i].bus;
+ *devfn = drhd->devices[i].devfn;
+ }
goto out;
}
@@ -829,8 +832,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && drhd->include_all) {
got_pdev:
- *bus = pdev->bus->number;
- *devfn = pdev->devfn;
+ if (bus && devfn) {
+ *bus = pdev->bus->number;
+ *devfn = pdev->devfn;
+ }
goto out;
}
}
@@ -1629,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flag;
+ if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
+ (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -2420,7 +2429,7 @@ static inline void unlink_domain_info(struct device_domain_info *info)
list_del(&info->link);
list_del(&info->global);
if (info->dev)
- info->dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(info->dev, NULL);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -2453,7 +2462,7 @@ static void do_deferred_attach(struct device *dev)
{
struct iommu_domain *domain;
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
domain = iommu_get_domain_for_dev(dev);
if (domain)
intel_iommu_attach_device(domain, dev);
@@ -2599,7 +2608,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
- dev->archdata.iommu = info;
+ dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
@@ -4004,7 +4013,7 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@@ -4039,11 +4048,12 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
+ drhd->gfx_dedicated = 1;
if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
- dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
}
@@ -5060,7 +5070,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
switch (type) {
case IOMMU_DOMAIN_DMA:
- /* fallthrough */
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(0);
if (!dmar_domain) {
@@ -5146,11 +5155,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
struct device *dev)
{
int ret;
- u8 bus, devfn;
unsigned long flags;
struct intel_iommu *iommu;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@@ -5236,9 +5244,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@@ -5416,7 +5423,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
sid = PCI_DEVID(bus, devfn);
/* Size is only valid in address selective invalidation */
- if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+ if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
size = to_vtd_size(inv_info->addr_info.granule_size,
inv_info->addr_info.nb_granules);
@@ -5425,6 +5432,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
IOMMU_CACHE_INV_TYPE_NR) {
int granu = 0;
u64 pasid = 0;
+ u64 addr = 0;
granu = to_vtd_granularity(cache_type, inv_info->granularity);
if (granu == -EINVAL) {
@@ -5446,13 +5454,12 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
+ /* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
(inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
- pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+ pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
inv_info->addr_info.addr, size);
- ret = -ERANGE;
- goto out_unlock;
}
/*
@@ -5464,25 +5471,35 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+ if (!info->ats_enabled)
+ break;
/*
* Always flush device IOTLB if ATS is enabled. vIOMMU
* in the guest may assume IOTLB flush is inclusive,
* which is more efficient.
*/
- if (info->ats_enabled)
- qi_flush_dev_iotlb_pasid(iommu, sid,
- info->pfsid, pasid,
- info->ats_qdep,
- inv_info->addr_info.addr,
- size, granu);
- break;
+ fallthrough;
case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+ /*
+ * PASID based device TLB invalidation does not support
+ * IOMMU_INV_GRANU_PASID granularity but only supports
+ * IOMMU_INV_GRANU_ADDR.
+ * The equivalent of that is we set the size to be the
+ * entire range of 64 bit. User only provides PASID info
+ * without address info. So we set addr to 0.
+ */
+ if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
+ size = 64 - VTD_PAGE_SHIFT;
+ addr = 0;
+ } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
+ addr = inv_info->addr_info.addr;
+ }
+
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(iommu, sid,
info->pfsid, pasid,
- info->ats_qdep,
- inv_info->addr_info.addr,
- size, granu);
+ info->ats_qdep, addr,
+ size);
else
pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
break;
@@ -5658,14 +5675,13 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
- dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
return &iommu->iommu;
}
@@ -5673,9 +5689,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return;
@@ -5825,37 +5840,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
-{
- struct intel_iommu *iommu;
- u8 bus, devfn;
-
- if (iommu_dummy(dev)) {
- dev_warn(dev,
- "No IOMMU translation for device; cannot enable SVM\n");
- return NULL;
- }
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if ((!iommu)) {
- dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
- return NULL;
- }
-
- return iommu;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
static int intel_iommu_enable_auxd(struct device *dev)
{
struct device_domain_info *info;
struct intel_iommu *iommu;
unsigned long flags;
- u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu || dmar_disabled)
return -EINVAL;
@@ -6080,6 +6072,7 @@ const struct iommu_ops intel_iommu_ops = {
.sva_bind = intel_svm_bind,
.sva_unbind = intel_svm_unbind,
.sva_get_pasid = intel_svm_get_pasid,
+ .page_response = intel_svm_page_response,
#endif
};
@@ -6182,6 +6175,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+{
+ unsigned short ver;
+
+ if (!IS_GFX_DEVICE(dev))
+ return;
+
+ ver = (dev->device >> 8) & 0xff;
+ if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+ ver != 0x9a)
+ return;
+
+ if (risky_device(dev))
+ return;
+
+ pci_info(dev, "Skip IOMMU disabling for graphics\n");
+ iommu_skip_te_disable = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
+
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aa096b333a99..23583b0e66a5 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -15,6 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
#include <asm/io_apic.h>
+#include <asm/apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c81f0f17c6ba..e6faedf42fd4 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -19,7 +19,7 @@
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
-#include "intel-pasid.h"
+#include "pasid.h"
/*
* Intel IOMMU system wide PASID name space:
@@ -486,7 +486,16 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qdep = info->ats_qdep;
pfsid = info->pfsid;
- qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ /*
+ * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
+ * devTLB flush w/o PASID should be used. For non-zero PASID under
+ * SVA usage, device could do DMA with multiple PASIDs. It is more
+ * efficient to flush devTLB specific to the PASID.
+ */
+ if (pasid == PASID_RID2PASID)
+ qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ else
+ qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
diff --git a/drivers/iommu/intel/intel-pasid.h b/drivers/iommu/intel/pasid.h
index c5318d40e0fa..c9850766c3a9 100644
--- a/drivers/iommu/intel/intel-pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel-pasid.h - PASID idr, table and entry header
+ * pasid.h - PASID idr, table and entry header
*
* Copyright (C) 2018 Intel Corporation
*
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 6c87c807a0ab..95c3164a2302 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -20,7 +20,7 @@
#include <linux/ioasid.h>
#include <asm/page.h>
-#include "intel-pasid.h"
+#include "pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, int pasid);
@@ -228,13 +228,57 @@ static LIST_HEAD(global_svm_list);
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
+static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
+ struct intel_svm **rsvm,
+ struct intel_svm_dev **rsdev)
+{
+ struct intel_svm_dev *d, *sdev = NULL;
+ struct intel_svm *svm;
+
+ /* The caller should hold the pasid_mutex lock */
+ if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
+ return -EINVAL;
+
+ if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
+ return -EINVAL;
+
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (IS_ERR(svm))
+ return PTR_ERR(svm);
+
+ if (!svm)
+ goto out;
+
+ /*
+ * If we found svm for the PASID, there must be at least one device
+ * bond.
+ */
+ if (WARN_ON(list_empty(&svm->devs)))
+ return -EINVAL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(d, &svm->devs, list) {
+ if (d->dev == dev) {
+ sdev = d;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+out:
+ *rsvm = svm;
+ *rsdev = sdev;
+
+ return 0;
+}
+
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
- struct intel_svm_dev *sdev;
- struct intel_svm *svm;
+ struct intel_svm *svm = NULL;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -261,39 +305,23 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
- svm = ioasid_find(NULL, data->hpasid, NULL);
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- if (svm) {
+ if (sdev) {
/*
- * If we found svm for the PASID, there must be at
- * least one device bond, otherwise svm should be freed.
+ * Do not allow multiple bindings of the same device-PASID since
+ * there is only one SL page tables per PASID. We may revisit
+ * once sharing PGD across domains are supported.
*/
- if (WARN_ON(list_empty(&svm->devs))) {
- ret = -EINVAL;
- goto out;
- }
+ dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
+ svm->pasid);
+ ret = -EBUSY;
+ goto out;
+ }
- for_each_svm_dev(sdev, svm, dev) {
- /*
- * For devices with aux domains, we should allow
- * multiple bind calls with the same PASID and pdev.
- */
- if (iommu_dev_feature_enabled(dev,
- IOMMU_DEV_FEAT_AUX)) {
- sdev->users++;
- } else {
- dev_warn_ratelimited(dev,
- "Already bound with PASID %u\n",
- svm->pasid);
- ret = -EBUSY;
- }
- goto out;
- }
- } else {
+ if (!svm) {
/* We come here when PASID has never been bond to a device. */
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) {
@@ -373,28 +401,20 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
int intel_svm_unbind_gpasid(struct device *dev, int pasid)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
struct intel_svm *svm;
- int ret = -EINVAL;
+ int ret;
if (WARN_ON(!iommu))
return -EINVAL;
mutex_lock(&pasid_mutex);
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm) {
- ret = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- for_each_svm_dev(sdev, svm, dev) {
- ret = 0;
+ if (sdev) {
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users--;
if (!sdev->users) {
@@ -418,7 +438,6 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid)
kfree(svm);
}
}
- break;
}
out:
mutex_unlock(&pasid_mutex);
@@ -430,7 +449,7 @@ static int
intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
@@ -596,7 +615,7 @@ success:
if (sd)
*sd = sdev;
ret = 0;
- out:
+out:
return ret;
}
@@ -608,21 +627,15 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
struct intel_svm *svm;
int ret = -EINVAL;
- iommu = intel_svm_device_to_iommu(dev);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
goto out;
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm)
- goto out;
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- for_each_svm_dev(sdev, svm, dev) {
- ret = 0;
+ if (sdev) {
sdev->users--;
if (!sdev->users) {
list_del_rcu(&sdev->list);
@@ -651,10 +664,8 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree(svm);
}
}
- break;
}
- out:
-
+out:
return ret;
}
@@ -800,8 +811,63 @@ qi_retry:
}
}
+static int prq_to_iommu_prot(struct page_req_dsc *req)
+{
+ int prot = 0;
+
+ if (req->rd_req)
+ prot |= IOMMU_FAULT_PERM_READ;
+ if (req->wr_req)
+ prot |= IOMMU_FAULT_PERM_WRITE;
+ if (req->exe_req)
+ prot |= IOMMU_FAULT_PERM_EXEC;
+ if (req->pm_req)
+ prot |= IOMMU_FAULT_PERM_PRIV;
+
+ return prot;
+}
+
+static int
+intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+{
+ struct iommu_fault_event event;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ /* Fill in event data for device specific processing */
+ memset(&event, 0, sizeof(struct iommu_fault_event));
+ event.fault.type = IOMMU_FAULT_PAGE_REQ;
+ event.fault.prm.addr = desc->addr;
+ event.fault.prm.pasid = desc->pasid;
+ event.fault.prm.grpid = desc->prg_index;
+ event.fault.prm.perm = prq_to_iommu_prot(desc);
+
+ if (desc->lpig)
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+ if (desc->pasid_present) {
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+ }
+ if (desc->priv_data_present) {
+ /*
+ * Set last page in group bit if private data is present,
+ * page response is required as it does for LPIG.
+ * iommu_report_device_fault() doesn't understand this vendor
+ * specific requirement thus we set last_page as a workaround.
+ */
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+ memcpy(event.fault.prm.private_data, desc->priv_data,
+ sizeof(desc->priv_data));
+ }
+
+ return iommu_report_device_fault(dev, &event);
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
+ struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
@@ -813,7 +879,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
- struct intel_svm_dev *sdev;
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
@@ -849,6 +914,20 @@ static irqreturn_t prq_event_thread(int irq, void *d)
}
}
+ if (!sdev || sdev->sid != req->rid) {
+ struct intel_svm_dev *t;
+
+ sdev = NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &svm->devs, list) {
+ if (t->sid == req->rid) {
+ sdev = t;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
+
result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
@@ -859,6 +938,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!is_canonical_address(address))
goto bad_req;
+ /*
+ * If prq is to be handled outside iommu driver via receiver of
+ * the fault notifiers, we skip the page response here.
+ */
+ if (svm->flags & SVM_FLAG_GUEST_MODE) {
+ if (sdev && !intel_svm_prq_report(sdev->dev, req))
+ goto prq_advance;
+ else
+ goto bad_req;
+ }
+
/* If the mm is already defunct, don't handle faults. */
if (!mmget_not_zero(svm->mm))
goto bad_req;
@@ -872,29 +962,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
goto invalid;
ret = handle_mm_fault(vma, address,
- req->wr_req ? FAULT_FLAG_WRITE : 0);
+ req->wr_req ? FAULT_FLAG_WRITE : 0,
+ NULL);
if (ret & VM_FAULT_ERROR)
goto invalid;
result = QI_RESP_SUCCESS;
- invalid:
+invalid:
mmap_read_unlock(svm->mm);
mmput(svm->mm);
- bad_req:
- /* Accounting for major/minor faults? */
- rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list) {
- if (sdev->sid == req->rid)
- break;
- }
- /* Other devices can go away, but the drivers are not permitted
- * to unbind while any page faults might be in flight. So it's
- * OK to drop the 'lock' here now we have it. */
- rcu_read_unlock();
-
- if (WARN_ON(&sdev->list == &svm->devs))
- sdev = NULL;
-
+bad_req:
+ WARN_ON(!sdev);
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
(req->exe_req << 1) | (req->pm_req);
@@ -905,7 +983,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
and these can be NULL. Do not use them below this point! */
sdev = NULL;
svm = NULL;
- no_pasid:
+no_pasid:
if (req->lpig || req->priv_data_present) {
/*
* Per VT-d spec. v3.0 ch7.7, system software must
@@ -930,6 +1008,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
resp.qw3 = 0;
qi_submit_sync(iommu, &resp, 1, 0);
}
+prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -1000,3 +1079,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva)
return pasid;
}
+
+int intel_svm_page_response(struct device *dev,
+ struct iommu_fault_event *evt,
+ struct iommu_page_response *msg)
+{
+ struct iommu_fault_page_request *prm;
+ struct intel_svm_dev *sdev = NULL;
+ struct intel_svm *svm = NULL;
+ struct intel_iommu *iommu;
+ bool private_present;
+ bool pasid_present;
+ bool last_page;
+ u8 bus, devfn;
+ int ret = 0;
+ u16 sid;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (!msg || !evt)
+ return -EINVAL;
+
+ mutex_lock(&pasid_mutex);
+
+ prm = &evt->fault.prm;
+ sid = PCI_DEVID(bus, devfn);
+ pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+ last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+
+ if (!pasid_present) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
+ if (ret || !sdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * For responses from userspace, need to make sure that the
+ * pasid has been bound to its mm.
+ */
+ if (svm->flags & SVM_FLAG_GUEST_MODE) {
+ struct mm_struct *mm;
+
+ mm = get_task_mm(current);
+ if (!mm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (mm != svm->mm) {
+ ret = -ENODEV;
+ mmput(mm);
+ goto out;
+ }
+
+ mmput(mm);
+ }
+
+ /*
+ * Per VT-d spec. v3.0 ch7.7, system software must respond
+ * with page group response if private data is present (PDP)
+ * or last page in group (LPIG) bit is set. This is an
+ * additional VT-d requirement beyond PCI ATS spec.
+ */
+ if (last_page || private_present) {
+ struct qi_desc desc;
+
+ desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
+ QI_PGRP_PASID_P(pasid_present) |
+ QI_PGRP_PDP(private_present) |
+ QI_PGRP_RESP_CODE(msg->code) |
+ QI_PGRP_RESP_TYPE;
+ desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw2 = 0;
+ desc.qw3 = 0;
+ if (private_present)
+ memcpy(&desc.qw2, prm->private_data,
+ sizeof(prm->private_data));
+
+ qi_submit_sync(iommu, &desc, 1, 0);
+ }
+out:
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4272fe4e17f4..a688f22cbe3b 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
- int lvl, arm_v7s_iopte *ptep)
+ int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *cptep;
@@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+ cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
if (!cptep)
return -ENOMEM;
@@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
@@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
- ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+ ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
- IOMMU_CACHE))
+ IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC))
+ IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap of partial unmap */
- if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
+ if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova_start + size + 42)
@@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 04fbd4bf0ff9..dc7bcf858b6d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
- int lvl, arm_lpae_iopte *ptep)
+ int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
{
arm_lpae_iopte *cptep, pte;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
@@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
+ cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
if (!cptep)
return -ENOMEM;
@@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_SYS_CACHE_ONLY)
- pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
- << ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_CACHE)
@@ -461,7 +458,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
}
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int iommu_prot)
+ phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
@@ -483,7 +480,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -1178,12 +1175,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
- IOMMU_CACHE))
+ IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops, i);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC))
+ IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -1198,7 +1195,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap of partial unmap */
- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
@@ -1216,7 +1213,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b6858adc4f17..609bd25bf154 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -383,8 +383,8 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
* Elements are sorted by start address and overlapping segments
* of the same type are merged.
*/
-int iommu_insert_resv_region(struct iommu_resv_region *new,
- struct list_head *regions)
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
{
struct iommu_resv_region *iter, *tmp, *nr, *top;
LIST_HEAD(stack);
@@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault);
int iommu_page_response(struct device *dev,
struct iommu_page_response *msg)
{
- bool pasid_valid;
+ bool needs_pasid;
int ret = -EINVAL;
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
+ bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
@@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev,
*/
list_for_each_entry(evt, &param->fault_param->faults, list) {
prm = &evt->fault.prm;
- pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ if (prm->grpid != msg->grpid)
+ continue;
- if ((pasid_valid && prm->pasid != msg->pasid) ||
- prm->grpid != msg->grpid)
+ /*
+ * If the PASID is required, the corresponding request is
+ * matched using the group ID, the PASID valid bit and the PASID
+ * value. Otherwise only the group ID matches request and
+ * response.
+ */
+ needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+ if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
continue;
- /* Sanitize the reply */
- msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+ if (!needs_pasid && has_pasid) {
+ /* No big deal, just clear it. */
+ msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
+ msg->pasid = 0;
+ }
ret = domain->ops->page_response(dev, evt, msg);
list_del(&evt->list);
@@ -2168,8 +2179,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int __iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -2319,9 +2330,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot,
- gfp_t gfp)
+static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 49fc01f2a28d..45a251da5453 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
for (i = 0 ; i < mag->size; ++i) {
struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
- BUG_ON(!iova);
+ if (WARN_ON(!iova))
+ continue;
+
private_free_iova(iovad, iova);
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 6de86e73dfc3..0f18abda0e20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -3,7 +3,7 @@
* IOMMU API for Renesas VMSA-compatible IPMMU
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2020 Renesas Electronics Corporation
*/
#include <linux/bitmap.h>
@@ -686,7 +686,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return domain->iop->map(domain->iop, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@@ -739,7 +739,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
+ { .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", },
+ { .soc_id = "r8a77961", },
{ .soc_id = "r8a7796", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77970", },
@@ -751,7 +753,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
+ { .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
+ { .soc_id = "r8a77961", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77990", },
{ .soc_id = "r8a77995", },
@@ -963,12 +967,18 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774e1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a7795",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a7796",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a77961",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a77965",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3d8a63555c25..3615cd6241c4 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
- ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
@@ -593,14 +593,14 @@ static void insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
- struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
+ struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
- dev->archdata.iommu = master;
+ dev_iommu_priv_set(dev, master);
}
for (sid = 0; sid < master->num_mids; sid++)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2be96f1cdbd2..785b228d39a6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -37,12 +37,18 @@
#define REG_MMU_INVLD_START_A 0x024
#define REG_MMU_INVLD_END_A 0x028
-#define REG_MMU_INV_SEL 0x038
+#define REG_MMU_INV_SEL_GEN2 0x02c
+#define REG_MMU_INV_SEL_GEN1 0x038
#define F_INVLD_EN0 BIT(0)
#define F_INVLD_EN1 BIT(1)
-#define REG_MMU_STANDARD_AXI_MODE 0x048
+#define REG_MMU_MISC_CTRL 0x048
+#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
+#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
+
#define REG_MMU_DCM_DIS 0x050
+#define REG_MMU_WR_LEN_CTRL 0x054
+#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
@@ -88,10 +94,12 @@
#define REG_MMU1_INVLD_PA 0x148
#define REG_MMU0_INT_ID 0x150
#define REG_MMU1_INT_ID 0x154
+#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
+#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
-#define MTK_PROTECT_PA_ALIGN 128
+#define MTK_PROTECT_PA_ALIGN 256
/*
* Get the local arbiter ID and the portid within the larb arbiter
@@ -100,6 +108,18 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+#define HAS_4GB_MODE BIT(0)
+/* HW will use the EMI clock if there isn't the "bclk". */
+#define HAS_BCLK BIT(1)
+#define HAS_VLD_PA_RNG BIT(2)
+#define RESET_AXI BIT(3)
+#define OUT_ORDER_WR_EN BIT(4)
+#define HAS_SUB_COMM BIT(5)
+#define WR_THROT_EN BIT(6)
+
+#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
+ ((((pdata)->flags) & (_x)) == (_x))
+
struct mtk_iommu_domain {
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -165,7 +185,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
for_each_m4u(data) {
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + REG_MMU_INV_SEL);
+ data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
}
@@ -182,7 +202,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
for_each_m4u(data) {
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + REG_MMU_INV_SEL);
+ data->base + data->plat_data->inv_sel_reg);
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1,
@@ -226,7 +246,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
struct mtk_iommu_data *data = dev_id;
struct mtk_iommu_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa;
- unsigned int fault_larb, fault_port;
+ unsigned int fault_larb, fault_port, sub_comm = 0;
bool layer, write;
/* Read error info from registers */
@@ -242,10 +262,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
}
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
- fault_larb = F_MMU_INT_ID_LARB_ID(regval);
fault_port = F_MMU_INT_ID_PORT_ID(regval);
-
- fault_larb = data->plat_data->larbid_remap[fault_larb];
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
+ fault_larb = F_MMU_INT_ID_COMM_ID(regval);
+ sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+ } else {
+ fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+ }
+ fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -397,7 +421,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
- return dom->iop->map(dom->iop, iova, paddr, size, prot);
+ return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -532,11 +556,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return ret;
}
- if (data->plat_data->m4u_plat == M4U_MT8173)
+ if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
- else
- regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+ } else {
+ regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
+ regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+ }
writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
regval = F_L2_MULIT_HIT_EN |
@@ -563,7 +589,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
upper_32_bits(data->protect_base);
writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
- if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
+ if (data->enable_4GB &&
+ MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
/*
* If 4GB mode is enabled, the validate PA range is from
* 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -572,9 +599,23 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
}
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
+ /* write command throttling mode */
+ regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
+ regval &= ~F_MMU_WR_THROT_DIS_MASK;
+ writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
+ }
- if (data->plat_data->reset_axi)
- writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
+ /* The register is called STANDARD_AXI_MODE in this case */
+ regval = 0;
+ } else {
+ regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
+ regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
+ regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
+ }
+ writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
@@ -616,7 +657,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
- if (!data->plat_data->has_4gb_mode)
+ if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
data->enable_4GB = false;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -629,7 +670,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (data->irq < 0)
return data->irq;
- if (data->plat_data->has_bclk) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
data->bclk = devm_clk_get(dev, "bclk");
if (IS_ERR(data->bclk))
return PTR_ERR(data->bclk);
@@ -718,8 +759,8 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
struct mtk_iommu_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
- reg->standard_axi_mode = readl_relaxed(base +
- REG_MMU_STANDARD_AXI_MODE);
+ reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
+ reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
@@ -743,8 +784,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
- writel_relaxed(reg->standard_axi_mode,
- base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
+ writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
@@ -763,28 +804,35 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
static const struct mtk_iommu_plat_data mt2712_data = {
.m4u_plat = M4U_MT2712,
- .has_4gb_mode = true,
- .has_bclk = true,
- .has_vld_pa_rng = true,
- .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+ .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
+};
+
+static const struct mtk_iommu_plat_data mt6779_data = {
+ .m4u_plat = M4U_MT6779,
+ .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
- .has_4gb_mode = true,
- .has_bclk = true,
- .reset_axi = true,
- .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
static const struct mtk_iommu_plat_data mt8183_data = {
.m4u_plat = M4U_MT8183,
- .reset_axi = true,
- .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+ .flags = RESET_AXI,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+ { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index ea949a324e33..122925dbe547 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -15,34 +15,39 @@
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
#include <soc/mediatek/smi.h>
+#define MTK_LARB_COM_MAX 8
+#define MTK_LARB_SUBCOM_MAX 4
+
struct mtk_iommu_suspend_reg {
- u32 standard_axi_mode;
+ union {
+ u32 standard_axi_mode;/* v1 */
+ u32 misc_ctrl;/* v2 */
+ };
u32 dcm_dis;
u32 ctrl_reg;
u32 int_control0;
u32 int_main_control;
u32 ivrp_paddr;
u32 vld_pa_rng;
+ u32 wr_len_ctrl;
};
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
+ M4U_MT6779,
M4U_MT8173,
M4U_MT8183,
};
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat;
- bool has_4gb_mode;
-
- /* HW will use the EMI clock if there isn't the "bclk". */
- bool has_bclk;
- bool has_vld_pa_rng;
- bool reset_axi;
- unsigned char larbid_remap[MTK_LARB_NR_MAX];
+ u32 flags;
+ u32 inv_sel_reg;
+ unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
struct mtk_iommu_domain;
@@ -62,6 +67,8 @@ struct mtk_iommu_data {
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
+ struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
+
struct list_head list;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index c9d79cff4d17..82ddfe9170d4 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -269,7 +269,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
int ret;
/* Only allow the domain created internally. */
- mtk_mapping = data->dev->archdata.iommu;
+ mtk_mapping = data->mapping;
if (mtk_mapping->domain != domain)
return 0;
@@ -369,7 +369,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
- struct device *m4udev;
int ret;
if (args->args_count != 1) {
@@ -401,8 +400,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
return ret;
data = dev_iommu_priv_get(dev);
- m4udev = data->dev;
- mtk_mapping = m4udev->archdata.iommu;
+ mtk_mapping = data->mapping;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
@@ -410,7 +408,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
- m4udev->archdata.iommu = mtk_mapping;
+ data->mapping = mtk_mapping;
}
return 0;
@@ -459,7 +457,7 @@ static void mtk_iommu_probe_finalize(struct device *dev)
int err;
data = dev_iommu_priv_get(dev);
- mtk_mapping = data->dev->archdata.iommu;
+ mtk_mapping = data->mapping;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err)
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 8e19bfa94121..a99afb5d9011 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock);
bytes = omap_iommu_dump_ctx(obj, p, count);
+ if (bytes < 0)
+ goto err;
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
+err:
mutex_unlock(&iommu_debug_lock);
kfree(buf);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index c8282cc212cb..71f29c0927fc 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -3,7 +3,7 @@
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
- * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
@@ -71,7 +71,7 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
**/
void omap_iommu_save_ctx(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
**/
void omap_iommu_restore_ctx(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@@ -1398,7 +1398,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int omap_iommu_count(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
int count = 0;
while (arch_data->iommu_dev) {
@@ -1459,8 +1459,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
int ret = 0;
@@ -1524,7 +1524,7 @@ out:
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_device *iommu = omap_domain->iommus;
struct omap_iommu *oiommu;
int i;
@@ -1650,7 +1650,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
int num_iommus, i;
/*
- * Allocate the archdata iommu structure for DT-based devices.
+ * Allocate the per-device iommu structure for DT-based devices.
*
* TODO: Simplify this when removing non-DT support completely from the
* IOMMU users.
@@ -1698,7 +1698,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
of_node_put(np);
}
- dev->archdata.iommu = arch_data;
+ dev_iommu_priv_set(dev, arch_data);
/*
* use the first IOMMU alone for the sysfs device linking.
@@ -1712,19 +1712,19 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
static void omap_iommu_release_device(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
if (!dev->of_node || !arch_data)
return;
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
kfree(arch_data);
}
static struct iommu_group *omap_iommu_device_group(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct iommu_group *group = ERR_PTR(-EINVAL);
if (!arch_data)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index d25c2486ca07..e5d86b7177de 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -836,7 +836,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- struct rk_iommudata *data = dev->archdata.iommu;
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
return data ? data->iommu : NULL;
}
@@ -1059,7 +1059,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
struct rk_iommudata *data;
struct rk_iommu *iommu;
- data = dev->archdata.iommu;
+ data = dev_iommu_priv_get(dev);
if (!data)
return ERR_PTR(-ENODEV);
@@ -1073,7 +1073,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
static void rk_iommu_release_device(struct device *dev)
{
- struct rk_iommudata *data = dev->archdata.iommu;
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
device_link_del(data->link);
}
@@ -1100,7 +1100,7 @@ static int rk_iommu_of_xlate(struct device *dev,
iommu_dev = of_find_device_by_node(args->np);
data->iommu = platform_get_drvdata(iommu_dev);
- dev->archdata.iommu = data;
+ dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 5fbdff6ff41a..fac720273889 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -113,8 +113,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
if (gart->active_domain && gart->active_domain != domain) {
ret = -EBUSY;
- } else if (dev->archdata.iommu != domain) {
- dev->archdata.iommu = domain;
+ } else if (dev_iommu_priv_get(dev) != domain) {
+ dev_iommu_priv_set(dev, domain);
gart->active_domain = domain;
gart->active_devices++;
}
@@ -131,8 +131,8 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
spin_lock(&gart->dom_lock);
- if (dev->archdata.iommu == domain) {
- dev->archdata.iommu = NULL;
+ if (dev_iommu_priv_get(dev) == domain) {
+ dev_iommu_priv_set(dev, NULL);
if (--gart->active_devices == 0)
gart->active_domain = NULL;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7426b7666e2b..124c8848ab7e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -465,7 +465,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
@@ -780,7 +780,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
* supported by the Linux kernel, so abort after the
* first match.
*/
- dev->archdata.iommu = smmu;
+ dev_iommu_priv_set(dev, smmu);
break;
}
@@ -797,7 +797,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
static void tegra_smmu_release_device(struct device *dev)
{
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
}
static const struct tegra_smmu_group_soc *
@@ -856,7 +856,7 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct iommu_group *group;
group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index f6f07489a9aa..2bfdd5734844 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -440,7 +440,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
default:
dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
mem->subtype);
- /* Fall-through */
+ fallthrough;
case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
region = iommu_alloc_resv_region(start, size, 0,
IOMMU_RESV_RESERVED);
@@ -1010,8 +1010,8 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
return ret;
- virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
- &viommu->pgsize_bitmap);
+ virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
+ &viommu->pgsize_bitmap);
if (!viommu->pgsize_bitmap) {
ret = -EINVAL;
@@ -1022,25 +1022,25 @@ static int viommu_probe(struct virtio_device *vdev)
viommu->last_domain = ~0U;
/* Optional features */
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.start,
- &input_start);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+ struct virtio_iommu_config, input_range.start,
+ &input_start);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.end,
- &input_end);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+ struct virtio_iommu_config, input_range.end,
+ &input_end);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.start,
- &viommu->first_domain);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.start,
+ &viommu->first_domain);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.end,
- &viommu->last_domain);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.end,
+ &viommu->last_domain);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
- struct virtio_iommu_config, probe_size,
- &viommu->probe_size);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
+ struct virtio_iommu_config, probe_size,
+ &viommu->probe_size);
viommu->geometry = (struct iommu_domain_geometry) {
.aperture_start = input_start,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bb70b7177f94..bfc9719dbcdc 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -425,7 +425,7 @@ config GOLDFISH_PIC
for Goldfish based virtual platforms.
config QCOM_PDC
- tristate "QCOM PDC"
+ bool "QCOM PDC"
depends on ARCH_QCOM
select IRQ_DOMAIN_HIERARCHY
help
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 95f097448f97..548de7538632 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2737,7 +2737,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
switch (gpsz) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case GIC_PAGE_SIZE_4K:
psz = SZ_4K;
break;
@@ -2832,7 +2832,7 @@ static int allocate_vpe_l1_table(void)
switch (gpsz) {
default:
gpsz = GIC_PAGE_SIZE_4K;
- /* fall through */
+ fallthrough;
case GIC_PAGE_SIZE_4K:
psz = SZ_4K;
break;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 324f280ff606..850842f27bee 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -965,10 +965,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
- /* Fall through */
+ fallthrough;
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
- /* Fall through */
+ fallthrough;
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
@@ -982,10 +982,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
- /* Fall through */
+ fallthrough;
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
- /* Fall through */
+ fallthrough;
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4f74c15c4755..7031ef44de4f 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -259,7 +259,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
case 4:
writel_relaxed(~0, reg + GPC_IMR1_CORE2);
writel_relaxed(~0, reg + GPC_IMR1_CORE3);
- /* fall through */
+ fallthrough;
case 2:
writel_relaxed(~0, reg + GPC_IMR1_CORE0);
writel_relaxed(~0, reg + GPC_IMR1_CORE1);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 9f3da4260ca6..b61a8901ef72 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -125,7 +125,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- if (request_irq(parent_irq, intc_cascade, 0,
+ if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index aacfa012c082..215885962bb0 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -480,7 +480,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
- /* fall-through */
+ fallthrough;
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 62a61275aaa3..69ba8ce3c178 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -295,6 +295,4 @@ out_free:
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_cirq)
-IRQCHIP_MATCH("mediatek,mtk-cirq", mtk_cirq_of_init)
-IRQCHIP_PLATFORM_DRIVER_END(mtk_cirq)
+IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7299c5ab4d10..6ff98b87e5c0 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -231,6 +231,4 @@ out_free_chip:
kfree(chip_data);
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_sysirq)
-IRQCHIP_MATCH("mediatek,mt6577-sysirq", mtk_sysirq_of_init)
-IRQCHIP_PLATFORM_DRIVER_END(mtk_sysirq)
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 03a36be757d8..0c2c61db26b4 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -416,6 +416,16 @@ static void stm32_irq_ack(struct irq_data *d)
irq_gc_unlock(gc);
}
+/* directly set the target bit without reading first. */
+static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
@@ -449,9 +459,9 @@ static void stm32_exti_h_eoi(struct irq_data *d)
raw_spin_lock(&chip_data->rlock);
- stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
if (stm32_bank->fpr_ofst != UNDEF_REG)
- stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
raw_spin_unlock(&chip_data->rlock);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index b7cc5d6580d8..d4e97605456b 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
@@ -83,6 +84,7 @@ struct ti_sci_inta_vint_desc {
* @vint_mutex: Mutex to protect vint_list
* @base: Base address of the memory mapped IO registers
* @pdev: Pointer to platform device.
+ * @ti_sci_id: TI-SCI device identifier
*/
struct ti_sci_inta_irq_domain {
const struct ti_sci_handle *sci;
@@ -93,6 +95,7 @@ struct ti_sci_inta_irq_domain {
struct mutex vint_mutex;
void __iomem *base;
struct platform_device *pdev;
+ u32 ti_sci_id;
};
#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
@@ -129,6 +132,37 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
}
/**
+ * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @inta: IRQ domain corresponding to Interrupt Aggregator
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
+ u16 vint_id)
+{
+ struct device_node *np = dev_of_node(&inta->pdev->dev);
+ u32 base, parent_base, size;
+ const __be32 *range;
+ int len;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return vint_id;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ parent_base = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= vint_id && vint_id < base + size)
+ return vint_id - base + parent_base;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
* @domain: IRQ domain corresponding to Interrupt Aggregator
*
@@ -139,30 +173,52 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
struct ti_sci_inta_irq_domain *inta = domain->host_data;
struct ti_sci_inta_vint_desc *vint_desc;
struct irq_fwspec parent_fwspec;
+ struct device_node *parent_node;
unsigned int parent_virq;
- u16 vint_id;
+ u16 vint_id, p_hwirq;
+ int ret;
vint_id = ti_sci_get_free_resource(inta->vint);
if (vint_id == TI_SCI_RESOURCE_NULL)
return ERR_PTR(-EINVAL);
+ p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
+ if (p_hwirq < 0) {
+ ret = p_hwirq;
+ goto free_vint;
+ }
+
vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
- if (!vint_desc)
- return ERR_PTR(-ENOMEM);
+ if (!vint_desc) {
+ ret = -ENOMEM;
+ goto free_vint;
+ }
vint_desc->domain = domain;
vint_desc->vint_id = vint_id;
INIT_LIST_HEAD(&vint_desc->list);
- parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev)));
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = inta->pdev->id;
- parent_fwspec.param[1] = vint_desc->vint_id;
+ parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
+ parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = p_hwirq - 32;
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+ } else {
+ /* Parent is Interrupt Router */
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = p_hwirq;
+ }
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
if (parent_virq == 0) {
- kfree(vint_desc);
- return ERR_PTR(-EINVAL);
+ dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n");
+ ret = -EINVAL;
+ goto free_vint_desc;
+
}
vint_desc->parent_virq = parent_virq;
@@ -171,6 +227,11 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
ti_sci_inta_irq_handler, vint_desc);
return vint_desc;
+free_vint_desc:
+ kfree(vint_desc);
+free_vint:
+ ti_sci_release_resource(inta->vint, vint_id);
+ return ERR_PTR(ret);
}
/**
@@ -202,7 +263,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta
err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
dev_id, dev_index,
- inta->pdev->id,
+ inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
free_bit);
@@ -299,7 +360,7 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
HWIRQ_TO_DEVID(hwirq),
HWIRQ_TO_IRQID(hwirq),
- inta->pdev->id,
+ inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
event_desc->vint_bit);
@@ -547,21 +608,21 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id);
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
if (ret) {
dev_err(dev, "missing 'ti,sci-dev-id' property\n");
return -EINVAL;
}
- inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
- "ti,sci-rm-range-vint");
+ inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IA_VINT);
if (IS_ERR(inta->vint)) {
dev_err(dev, "VINT resource allocation failed\n");
return PTR_ERR(inta->vint);
}
- inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
- "ti,sci-rm-range-global-event");
+ inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT);
if (IS_ERR(inta->global_event)) {
dev_err(dev, "Global event resource allocation failed\n");
return PTR_ERR(inta->global_event);
@@ -592,6 +653,8 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&inta->vint_list);
mutex_init(&inta->vint_mutex);
+ dev_info(dev, "Interrupt Aggregator domain %d created\n", pdev->id);
+
return 0;
}
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 5ea148faf2ab..cbc1758228d9 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -17,29 +17,20 @@
#include <linux/of_irq.h>
#include <linux/soc/ti/ti_sci_protocol.h>
-#define TI_SCI_DEV_ID_MASK 0xffff
-#define TI_SCI_DEV_ID_SHIFT 16
-#define TI_SCI_IRQ_ID_MASK 0xffff
-#define TI_SCI_IRQ_ID_SHIFT 0
-#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
- (TI_SCI_DEV_ID_MASK))
-#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
-#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
- TI_SCI_DEV_ID_SHIFT) | \
- ((index) & TI_SCI_IRQ_ID_MASK))
-
/**
* struct ti_sci_intr_irq_domain - Structure representing a TISCI based
* Interrupt Router IRQ domain.
* @sci: Pointer to TISCI handle
- * @dst_irq: TISCI resource pointer representing GIC irq controller.
- * @dst_id: TISCI device ID of the GIC irq controller.
+ * @out_irqs: TISCI resource pointer representing INTR irqs.
+ * @dev: Struct device pointer.
+ * @ti_sci_id: TI-SCI device identifier
* @type: Specifies the trigger type supported by this Interrupt Router
*/
struct ti_sci_intr_irq_domain {
const struct ti_sci_handle *sci;
- struct ti_sci_resource *dst_irq;
- u32 dst_id;
+ struct ti_sci_resource *out_irqs;
+ struct device *dev;
+ u32 ti_sci_id;
u32 type;
};
@@ -70,16 +61,45 @@ static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain,
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
- if (fwspec->param_count != 2)
+ if (fwspec->param_count != 1)
return -EINVAL;
- *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]);
+ *hwirq = fwspec->param[0];
*type = intr->type;
return 0;
}
/**
+ * ti_sci_intr_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @intr: IRQ domain corresponding to Interrupt Router
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq)
+{
+ struct device_node *np = dev_of_node(intr->dev);
+ u32 base, pbase, size, len;
+ const __be32 *range;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return irq;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ pbase = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= irq && irq < base + size)
+ return irq - base + pbase;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
* @domain: Domain to which the irqs belong
* @virq: Linux virtual IRQ to be freed.
@@ -89,66 +109,76 @@ static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
- struct irq_data *data, *parent_data;
- u16 dev_id, irq_index;
+ struct irq_data *data;
+ int out_irq;
- parent_data = irq_domain_get_irq_data(domain->parent, virq);
data = irq_domain_get_irq_data(domain, virq);
- irq_index = HWIRQ_TO_IRQID(data->hwirq);
- dev_id = HWIRQ_TO_DEVID(data->hwirq);
+ out_irq = (uintptr_t)data->chip_data;
- intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index,
- intr->dst_id, parent_data->hwirq);
- ti_sci_release_resource(intr->dst_irq, parent_data->hwirq);
+ intr->sci->ops.rm_irq_ops.free_irq(intr->sci,
+ intr->ti_sci_id, data->hwirq,
+ intr->ti_sci_id, out_irq);
+ ti_sci_release_resource(intr->out_irqs, out_irq);
irq_domain_free_irqs_parent(domain, virq, 1);
irq_domain_reset_irq_data(data);
}
/**
- * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ
+ * ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ
* @domain: Pointer to the interrupt router IRQ domain
* @virq: Corresponding Linux virtual IRQ number
* @hwirq: Corresponding hwirq for the IRQ within this IRQ domain
*
- * Returns 0 if all went well else appropriate error pointer.
+ * Returns parent irq if all went well else appropriate error pointer.
*/
-static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain,
- unsigned int virq, u32 hwirq)
+static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
+ unsigned int virq, u32 hwirq)
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct device_node *parent_node;
struct irq_fwspec fwspec;
- u16 dev_id, irq_index;
- u16 dst_irq;
- int err;
-
- dev_id = HWIRQ_TO_DEVID(hwirq);
- irq_index = HWIRQ_TO_IRQID(hwirq);
+ u16 out_irq, p_hwirq;
+ int err = 0;
- dst_irq = ti_sci_get_free_resource(intr->dst_irq);
- if (dst_irq == TI_SCI_RESOURCE_NULL)
+ out_irq = ti_sci_get_free_resource(intr->out_irqs);
+ if (out_irq == TI_SCI_RESOURCE_NULL)
return -EINVAL;
- fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 3;
- fwspec.param[0] = 0; /* SPI */
- fwspec.param[1] = dst_irq - 32; /* SPI offset */
- fwspec.param[2] = intr->type;
+ p_hwirq = ti_sci_intr_xlate_irq(intr, out_irq);
+ if (p_hwirq < 0)
+ goto err_irqs;
+
+ parent_node = of_irq_find_parent(dev_of_node(intr->dev));
+ fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = p_hwirq - 32; /* SPI offset */
+ fwspec.param[2] = intr->type;
+ } else {
+ /* Parent is Interrupt Router */
+ fwspec.param_count = 1;
+ fwspec.param[0] = p_hwirq;
+ }
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (err)
goto err_irqs;
- err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index,
- intr->dst_id, dst_irq);
+ err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci,
+ intr->ti_sci_id, hwirq,
+ intr->ti_sci_id, out_irq);
if (err)
goto err_msg;
- return 0;
+ return p_hwirq;
err_msg:
irq_domain_free_irqs_parent(domain, virq, 1);
err_irqs:
- ti_sci_release_resource(intr->dst_irq, dst_irq);
+ ti_sci_release_resource(intr->out_irqs, out_irq);
return err;
}
@@ -168,18 +198,19 @@ static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain,
struct irq_fwspec *fwspec = data;
unsigned long hwirq;
unsigned int flags;
- int err;
+ int err, p_hwirq;
err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
if (err)
return err;
- err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq);
- if (err)
- return err;
+ p_hwirq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq);
+ if (p_hwirq < 0)
+ return p_hwirq;
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &ti_sci_intr_irq_chip, NULL);
+ &ti_sci_intr_irq_chip,
+ (void *)(uintptr_t)p_hwirq);
return 0;
}
@@ -214,6 +245,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
if (!intr)
return -ENOMEM;
+ intr->dev = dev;
ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
&intr->type);
if (ret) {
@@ -230,19 +262,19 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id",
- &intr->dst_id);
+ ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id",
+ &intr->ti_sci_id);
if (ret) {
- dev_err(dev, "missing 'ti,sci-dst-id' property\n");
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
return -EINVAL;
}
- intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev,
- intr->dst_id,
- "ti,sci-rm-range-girq");
- if (IS_ERR(intr->dst_irq)) {
+ intr->out_irqs = devm_ti_sci_get_resource(intr->sci, dev,
+ intr->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IR_OUTPUT);
+ if (IS_ERR(intr->out_irqs)) {
dev_err(dev, "Destination irq resource allocation failed\n");
- return PTR_ERR(intr->dst_irq);
+ return PTR_ERR(intr->out_irqs);
}
domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
@@ -252,6 +284,8 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ dev_info(dev, "Interrupt Router %d domain created\n", intr->ti_sci_id);
+
return 0;
}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index bc235db8a4c5..e46036374227 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -455,7 +455,7 @@ static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
- /* fall through */
+ fallthrough;
case AMBA_VENDOR_ARM:
break;
}
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1bb0e36c2bf3..d2341153e181 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -52,7 +52,7 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary.
*/
- if (par_np && !irq_find_matching_host(np, DOMAIN_BUS_ANY))
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
return -EPROBE_DEFER;
return irq_init_cb(np, par_np);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index c1c5dfad57cc..6ae9e1f0819d 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -11,11 +11,9 @@
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -432,8 +430,4 @@ fail:
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
-IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
-IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
-MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
-MODULE_LICENSE("GPL v2");
+IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index ecc1ef6c386d..f68569bfef7a 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -348,7 +348,7 @@ modehdlc(struct bchannel *bch, int protocol)
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
- /* fall through */
+ fallthrough;
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
index b0d772340e16..448ded8f9d24 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
@@ -121,7 +121,6 @@ setup_embedded(struct hfc_multi *hc, struct hm_map *m)
case HFC_IO_MODE_EMBSD:
test_and_set_bit(HFC_CHIP_EMBSD, &hc->chip);
hc->slots = 128; /* required */
- /* fall through */
hc->HFC_outb = HFC_outb_embsd;
hc->HFC_inb = HFC_inb_embsd;
hc->HFC_inw = HFC_inw_embsd;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 904a4f4c5ff9..56bd2e9db6ed 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1280,7 +1280,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
- /* fall through */
+ fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 4274906f8654..70061991915a 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -695,7 +695,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
switch (protocol) {
case (-1): /* used for init */
bch->state = -1;
- /* fall through */
+ fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0; /* already in idle state */
diff --git a/drivers/isdn/hardware/mISDN/isdnhdlc.c b/drivers/isdn/hardware/mISDN/isdnhdlc.c
index 9fea16ed3dd8..985367e6711d 100644
--- a/drivers/isdn/hardware/mISDN/isdnhdlc.c
+++ b/drivers/isdn/hardware/mISDN/isdnhdlc.c
@@ -397,7 +397,7 @@ int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, u16 slen,
dsize--;
break;
}
- /* fall through */
+ fallthrough;
case HDLC_SENDFLAG_ONE:
if (hdlc->bit_shift == 8) {
hdlc->cbin = hdlc->ffvalue >>
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index f4cb29766888..a16c7a2a7f3d 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -875,7 +875,7 @@ release_card(struct inf_hw *card) {
release_card(card->sc[i]);
card->sc[i] = NULL;
}
- /* fall through */
+ fallthrough;
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 11e8c7d8b6e8..56943409b60d 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -957,7 +957,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
- /* fall through */
+ fallthrough;
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1163,7 +1163,7 @@ setup_pump(struct isar_ch *ch) {
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
- /* fall through */
+ fallthrough;
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
@@ -1255,7 +1255,7 @@ setup_iom2(struct isar_ch *ch) {
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
- /* fall through */
+ fallthrough;
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
@@ -1548,7 +1548,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
ich->is->name, hh->id);
ret = -EINVAL;
}
- /* fall through */
+ fallthrough;
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 27aa32914425..c2f76f398613 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -528,7 +528,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
- /* fall through */
+ fallthrough;
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index b413bafe93fd..97c68731406b 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -301,7 +301,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
switch (type) {
case PBLK_WRITE:
kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
- /* fall through */
+ fallthrough;
case PBLK_WRITE_INT:
pool = &pblk->w_rq_pool;
break;
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index 75482eeab2c4..994ba5cb3678 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -881,7 +881,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
}
if (hid->name[0])
break;
- /* else fall through */
+ fallthrough;
default:
pr_info("Trying to register unknown ADB device to input layer.\n");
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 23f1f41c8602..96684581a25d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -852,7 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
- /* fall through */
+ fallthrough;
case SMU_I2C_TRANSFER_STDSUB:
if (cmd->info.sublen > 3)
return -EINVAL;
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index c10a9318a4b7..53945ca5d785 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs)
/* read last_rx_curr from register once */
pdcs->last_rx_curr =
- (ioread32(&pdcs->rxregs_64->status0) &
+ (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
do {
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 77fbfd52edcf..c1227bdb57e7 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -608,7 +608,7 @@ static void do_journal_discard(struct cache *ca)
ca->sb.njournal_buckets;
atomic_set(&ja->discard_in_flight, DISCARD_READY);
- /* fallthrough */
+ fallthrough;
case DISCARD_READY:
if (ja->discard_idx == ja->last_idx)
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 62fb917f7a4f..ae380bc3992e 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -33,27 +33,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \
case 'y': \
case 'z': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'e': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'p': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 't': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'g': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'm': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'k': \
u++; \
if (e++ == cp) \
return -EINVAL; \
- /* fall through */ \
+ fallthrough; \
case '\n': \
case '\0': \
if (*e == '\n') \
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 151aa95775be..af6d4f898e4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(cmd->bm);
+ r = PTR_ERR(cmd->bm);
+ cmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(cmd, may_format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(cmd->bm);
+ cmd->bm = NULL;
+ }
return r;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 148960721254..380386c36921 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -739,7 +739,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
struct skcipher_request *req;
struct scatterlist src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int err;
req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
@@ -936,7 +936,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
u8 *es, *ks, *data, *data2, *data_offset;
struct skcipher_request *req;
struct scatterlist *sg, *sg2, src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int i, r;
req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
@@ -1552,7 +1552,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
case -EBUSY:
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- /* fall through */
+ fallthrough;
/*
* The request is queued and processed asynchronously,
* completion function kcryptd_async_done() will be called.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 8c8d940e532e..3fc3757def55 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2487,6 +2487,7 @@ next_chunk:
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
if (ic->mode == 'B') {
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
}
@@ -2564,6 +2565,17 @@ next_chunk:
goto err;
}
+ if (ic->mode == 'B') {
+ sector_t start, end;
+ start = (range.logical_sector >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end = ((range.logical_sector + range.n_sectors) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
+ }
+
advance_and_next:
cond_resched();
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 53645a6f474c..de4da825ade6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1287,17 +1287,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ unsigned long flags;
+
+ if (!atomic_read(&m->pg_init_in_progress))
+ goto skip;
+
+ spin_lock_irqsave(&m->lock, flags);
+ if (atomic_read(&m->pg_init_in_progress) &&
+ !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
+ spin_unlock_irqrestore(&m->lock, flags);
- if (atomic_read(&m->pg_init_in_progress))
flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
+ multipath_wait_for_pg_init_completion(m);
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ spin_lock_irqsave(&m->lock, flags);
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
}
-
+skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
flush_work(&m->process_queued_bios);
flush_work(&m->trigger_event);
@@ -1554,7 +1562,7 @@ static void pg_init_done(void *data, int errors)
case SCSI_DH_RETRY:
/* Wait before retrying. */
delay_retry = true;
- /* fall through */
+ fallthrough;
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 76b6b323bf4b..b461836b6d26 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(pmd->bm);
+ r = PTR_ERR(pmd->bm);
+ pmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(pmd, format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(pmd->bm);
+ pmd->bm = NULL;
+ }
return r;
}
@@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
}
pmd_write_lock_in_core(pmd);
- if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
+ if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 86dbe0c8b45c..6271d1e741cf 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
pfn_t pfn;
int id;
struct page **pages;
+ sector_t offset;
wc->memory_vmapped = false;
@@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc)
goto err1;
}
+ offset = get_start_sect(wc->ssd_dev->bdev);
+ if (offset & (PAGE_SIZE / 512 - 1)) {
+ r = -EINVAL;
+ goto err1;
+ }
+ offset >>= PAGE_SHIFT - 9;
+
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
@@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32fa6499739f..fb0255d25e4b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1021,7 +1021,7 @@ static void clone_endio(struct bio *bio)
switch (r) {
case DM_ENDIO_REQUEUE:
error = BLK_STS_DM_REQUEUE;
- /*FALLTHRU*/
+ fallthrough;
case DM_ENDIO_DONE:
break;
case DM_ENDIO_INCOMPLETE:
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 6bbec89976a7..2cf973722f59 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -102,10 +102,10 @@ static int __init md_setup(char *str)
pername = "raid0";
break;
}
- /* FALL THROUGH */
+ fallthrough;
case 1: /* the first device is numeric */
str = str1;
- /* FALL THROUGH */
+ fallthrough;
case 0:
md_setup_args[ent].level = LEVEL_NONE;
pername="super-block";
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index d61b524ae440..b10c51988c8e 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1433,7 +1433,7 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
case 0:
md_bitmap_file_set_bit(bitmap, offset);
md_bitmap_count_page(&bitmap->counts, offset, 1);
- /* fall through */
+ fallthrough;
case 1:
*bmc = 2;
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 73fd50e77975..d50737ec4039 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
bitmap = get_bitmap_from_slot(mddev, i);
if (IS_ERR(bitmap)) {
pr_err("can't get bitmap from slot %d\n", i);
+ bitmap = NULL;
goto out;
}
counts = &bitmap->counts;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 15bbdc1630ed..607278207023 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -850,7 +850,13 @@ void mddev_unlock(struct mddev *mddev)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
+ if (mddev->sysfs_completed)
+ sysfs_put(mddev->sysfs_completed);
+ if (mddev->sysfs_degraded)
+ sysfs_put(mddev->sysfs_degraded);
mddev->sysfs_action = NULL;
+ mddev->sysfs_completed = NULL;
+ mddev->sysfs_degraded = NULL;
}
}
mddev->sysfs_active = 0;
@@ -4068,6 +4074,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pr_warn("md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
}
if (oldpers->sync_request != NULL &&
pers->sync_request == NULL) {
@@ -5582,14 +5590,9 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
- if (mddev->sysfs_completed)
- sysfs_put(mddev->sysfs_completed);
- if (mddev->sysfs_degraded)
- sysfs_put(mddev->sysfs_degraded);
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
-
if (mddev->gendisk)
del_gendisk(mddev->gendisk);
if (mddev->queue)
@@ -5757,8 +5760,6 @@ static int md_alloc(dev_t dev, char *name)
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
- mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
- mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
}
mddev_put(mddev);
@@ -6036,6 +6037,8 @@ int md_run(struct mddev *mddev)
pr_warn("md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 749ec268d957..54c089a50b15 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
void *p;
int r;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
@@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
struct buffer_aux *aux;
void *p;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
@@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock);
int dm_bm_flush(struct dm_block_manager *bm)
{
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
return dm_bufio_write_dirty_buffers(bm->bufio);
@@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
bool dm_bm_is_read_only(struct dm_block_manager *bm)
{
- return bm->read_only;
+ return (bm ? bm->read_only : true);
}
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
- bm->read_only = true;
+ if (bm)
+ bm->read_only = true;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
void dm_bm_set_read_write(struct dm_block_manager *bm)
{
- bm->read_only = false;
+ if (bm)
+ bm->read_only = false;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fb8d1fb14088..225380efd1e2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4083,7 +4083,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
break;
}
dev = &sh->dev[s->failed_num[0]];
- /* fall through */
+ fallthrough;
case check_state_compute_result:
sh->check_state = check_state_idle;
if (!dev)
@@ -4214,7 +4214,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
/* we have 2-disk failure */
BUG_ON(s->failed != 2);
- /* fall through */
+ fallthrough;
case check_state_compute_result:
sh->check_state = check_state_idle;
@@ -6514,9 +6514,12 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
/*
* The value should not be bigger than PAGE_SIZE. It requires to
- * be multiple of DEFAULT_STRIPE_SIZE.
+ * be multiple of DEFAULT_STRIPE_SIZE and the value should be power
+ * of two.
*/
- if (new % DEFAULT_STRIPE_SIZE != 0 || new > PAGE_SIZE || new == 0)
+ if (new % DEFAULT_STRIPE_SIZE != 0 ||
+ new > PAGE_SIZE || new == 0 ||
+ new != roundup_pow_of_two(new))
return -EINVAL;
err = mddev_lock(mddev);
@@ -7019,7 +7022,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
} else
goto abort;
spin_lock_init(&conf->device_lock);
- seqcount_init(&conf->gen_lock);
+ seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
init_waitqueue_head(&conf->wait_for_stripe);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 7fb3b26a181a..16fc29472f5c 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -582,7 +582,7 @@ struct r5conf {
int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */
- seqcount_t gen_lock; /* lock against generation changes */
+ seqcount_spinlock_t gen_lock; /* lock against generation changes */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
long long min_offset_diff; /* minimum difference between
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 630a75e0eeb1..7607b516a7c4 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -210,7 +210,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->vdownsampling[1] = 1;
tpg->hdownsampling[1] = 1;
tpg->planes = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGB332:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
@@ -271,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
tpg->buffers = 3;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
tpg->vdownsampling[1] = 2;
@@ -284,7 +284,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YVU422M:
tpg->buffers = 3;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV422P:
tpg->vdownsampling[1] = 1;
tpg->vdownsampling[2] = 1;
@@ -296,7 +296,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
tpg->buffers = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
tpg->vdownsampling[1] = 1;
@@ -308,7 +308,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV21M:
tpg->buffers = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
tpg->vdownsampling[1] = 2;
@@ -1275,7 +1275,7 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_ARGB444:
buf[0][offset] = (g_u_s << 4) | b_v;
@@ -1283,21 +1283,21 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_RGBX444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA444:
buf[0][offset] = (b_v << 4) | (alpha >> 4);
buf[0][offset + 1] = (r_y_h << 4) | g_u_s;
break;
case V4L2_PIX_FMT_XBGR444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR444:
buf[0][offset] = (g_u_s << 4) | r_y_h;
buf[0][offset + 1] = (alpha & 0xf0) | b_v;
break;
case V4L2_PIX_FMT_BGRX444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA444:
buf[0][offset] = (r_y_h << 4) | (alpha >> 4);
buf[0][offset + 1] = (b_v << 4) | g_u_s;
@@ -1305,7 +1305,7 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_ARGB555:
buf[0][offset] = (g_u_s << 5) | b_v;
@@ -1314,7 +1314,7 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_RGBX555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA555:
buf[0][offset] = (g_u_s << 6) | (b_v << 1) |
((alpha & 0x80) >> 7);
@@ -1322,7 +1322,7 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_XBGR555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR555:
buf[0][offset] = (g_u_s << 5) | r_y_h;
buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2)
@@ -1330,7 +1330,7 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_BGRX555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA555:
buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) |
((alpha & 0x80) >> 7);
@@ -1339,7 +1339,7 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ARGB555X:
buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3);
buf[0][offset + 1] = (g_u_s << 5) | b_v;
@@ -1366,7 +1366,7 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_HSV32:
case V4L2_PIX_FMT_XYUV32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_AYUV32:
@@ -1377,7 +1377,7 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_RGBX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA32:
buf[0][offset] = r_y_h;
buf[0][offset + 1] = g_u_s;
@@ -1388,7 +1388,7 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_VUYX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_VUYA32:
buf[0][offset] = b_v;
@@ -1398,7 +1398,7 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_BGRX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA32:
buf[0][offset] = alpha;
buf[0][offset + 1] = b_v;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 630509ecee20..89620da983ba 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -546,7 +546,7 @@ static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
h->priv->ule_sndu_type_1 = 1;
h->ts_remain -= 1;
h->from_where += 1;
- /* fallthrough */
+ fallthrough;
case 0:
h->new_ts = 1;
h->ts += TS_SZ;
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index e92542b92d34..da0ff7b44da4 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -773,7 +773,7 @@ static int bcm3510_init(struct dvb_frontend* fe)
deb_info("attempting to download firmware\n");
if ((ret = bcm3510_init_cold(st)) < 0)
return ret;
- /* fall-through */
+ fallthrough;
case JDEC_EEPROM_LOAD_WAIT:
deb_info("firmware is loaded\n");
bcm3510_check_firmware_version(st);
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index bc374750529b..08a85831e917 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1693,7 +1693,7 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
if (state->identity.p1g)
state->dc = dc_p1g_table;
- /* fall through */
+ fallthrough;
case CT_TUNER_STEP_0:
dprintk("Start/continue DC calibration for %s path\n",
(state->dc->i == 1) ? "I" : "Q");
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 0f0480d8576d..a6c2fc4586eb 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -224,7 +224,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
switch (c->hierarchy) {
case HIERARCHY_NONE:
deb_setf("hierarchy: none\n");
- /* fall through */
+ fallthrough;
case HIERARCHY_1:
deb_setf("hierarchy: alpha=1\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 0a7790c4bad3..55bee50aa871 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -276,7 +276,7 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p
if (state->version != SOC7090)
reg_1280 &= ~((1 << 11));
reg_1280 &= ~(1 << 6);
- /* fall-through */
+ fallthrough;
case DIB7000P_POWER_INTERFACE_ONLY:
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
/* TODO power up either SDIO or I2C */
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 5de016412c42..237b9d04c076 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2306,7 +2306,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16
pr_err("error %d\n", rc);
goto rw_error;
}
- /* fallthrough */
+ fallthrough;
case SIO_HI_RA_RAM_CMD_BRDCTRL:
rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0);
if (rc != 0) {
@@ -2318,7 +2318,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16
pr_err("error %d\n", rc);
goto rw_error;
}
- /* fallthrough */
+ fallthrough;
case SIO_HI_RA_RAM_CMD_NULL:
/* No parameters */
break;
@@ -2841,7 +2841,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
/* coef = 188/204 */
max_bit_rate =
(ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
- /* fall-through - as b/c Annex A/C need following settings */
+ fallthrough; /* as b/c Annex A/C need following settings */
case DRX_STANDARD_ITU_B:
rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
if (rc != 0) {
@@ -3555,8 +3555,8 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg
if (!ext_attr->has_smatx)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE_SMA: /* fall through */
- case DRX_UIO_MODE_FIRMWARE_SAW: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE_SMA:
+ case DRX_UIO_MODE_FIRMWARE_SAW:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_sma_tx_mode = uio_cfg->mode;
break;
@@ -3579,7 +3579,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg
if (!ext_attr->has_smarx)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_sma_rx_mode = uio_cfg->mode;
break;
@@ -3603,7 +3603,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg
if (!ext_attr->has_gpio)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_gpio_mode = uio_cfg->mode;
break;
@@ -3639,7 +3639,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg
}
ext_attr->uio_irqn_mode = uio_cfg->mode;
break;
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
default:
return -EINVAL;
break;
@@ -4004,31 +4004,36 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 4:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 3:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 2:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 1:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 0:
/* do nothing */
break;
@@ -4068,25 +4073,29 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 3:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 2:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 1:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 0:
/* do nothing */
break;
@@ -4791,7 +4800,7 @@ set_frequency(struct drx_demod_instance *demod,
Sound carrier is already 3Mhz above centre frequency due
to tuner setting so now add an extra shift of 1MHz... */
fm_frequency_shift = 1000;
- /*fall through */
+ fallthrough;
case DRX_STANDARD_ITU_B:
case DRX_STANDARD_NTSC:
case DRX_STANDARD_PAL_SECAM_BG:
@@ -10475,11 +10484,11 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel)
(standard == DRX_STANDARD_NTSC)) {
switch (channel->bandwidth) {
case DRX_BANDWIDTH_6MHZ:
- case DRX_BANDWIDTH_UNKNOWN: /* fall through */
+ case DRX_BANDWIDTH_UNKNOWN:
channel->bandwidth = DRX_BANDWIDTH_6MHZ;
break;
- case DRX_BANDWIDTH_8MHZ: /* fall through */
- case DRX_BANDWIDTH_7MHZ: /* fall through */
+ case DRX_BANDWIDTH_8MHZ:
+ case DRX_BANDWIDTH_7MHZ:
default:
return -EINVAL;
}
@@ -10511,10 +10520,10 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel)
}
switch (channel->constellation) {
- case DRX_CONSTELLATION_QAM16: /* fall through */
- case DRX_CONSTELLATION_QAM32: /* fall through */
- case DRX_CONSTELLATION_QAM64: /* fall through */
- case DRX_CONSTELLATION_QAM128: /* fall through */
+ case DRX_CONSTELLATION_QAM16:
+ case DRX_CONSTELLATION_QAM32:
+ case DRX_CONSTELLATION_QAM64:
+ case DRX_CONSTELLATION_QAM128:
case DRX_CONSTELLATION_QAM256:
bandwidth_temp = channel->symbolrate * bw_rolloff_factor;
bandwidth = bandwidth_temp / 100;
@@ -10628,8 +10637,8 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel)
}
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
rc = set_qam_channel(demod, channel, tuner_freq_offset);
if (rc != 0) {
@@ -10820,7 +10829,7 @@ ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_st
SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK;
break;
#endif
- case DRX_STANDARD_UNKNOWN: /* fallthrough */
+ case DRX_STANDARD_UNKNOWN:
default:
return -EIO;
}
@@ -10888,8 +10897,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard)
*/
switch (prev_standard) {
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
rc = power_down_qam(demod, false);
if (rc != 0) {
@@ -10908,7 +10917,7 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard)
case DRX_STANDARD_UNKNOWN:
/* Do nothing */
break;
- case DRX_STANDARD_AUTO: /* fallthrough */
+ case DRX_STANDARD_AUTO:
default:
return -EINVAL;
}
@@ -10921,8 +10930,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard)
switch (*standard) {
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
do {
u16 dummy;
@@ -11111,12 +11120,12 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode)
goto rw_error;
}
break;
- case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */
- case DRX_STANDARD_NTSC: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_BG:
+ case DRX_STANDARD_PAL_SECAM_DK:
+ case DRX_STANDARD_PAL_SECAM_I:
+ case DRX_STANDARD_PAL_SECAM_L:
+ case DRX_STANDARD_PAL_SECAM_LP:
+ case DRX_STANDARD_NTSC:
case DRX_STANDARD_FM:
rc = power_down_atv(demod, ext_attr->standard, true);
if (rc != 0) {
@@ -11127,7 +11136,7 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode)
case DRX_STANDARD_UNKNOWN:
/* Do nothing */
break;
- case DRX_STANDARD_AUTO: /* fallthrough */
+ case DRX_STANDARD_AUTO:
default:
return -EIO;
}
@@ -11220,8 +11229,8 @@ ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw *
ext_attr->vsb_pre_saw_cfg = *pre_saw;
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
ext_attr->qam_pre_saw_cfg = *pre_saw;
break;
@@ -11264,10 +11273,10 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain
ext_attr = (struct drxj_data *) demod->my_ext_attr;
switch (afe_gain->standard) {
- case DRX_STANDARD_8VSB: /* fallthrough */
+ case DRX_STANDARD_8VSB: fallthrough;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
#endif
/* Do nothing */
@@ -11301,8 +11310,8 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain
ext_attr->vsb_pga_cfg = gain * 13 + 140;
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
ext_attr->qam_pga_cfg = gain * 13 + 140;
break;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index fae6f3763364..45f982863904 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1512,14 +1512,14 @@ static int SetDeviceTypeId(struct drxd_state *state)
switch (deviceId) {
case 4:
state->diversity = 1;
- /* fall through */
+ fallthrough;
case 3:
case 7:
state->PGA = 1;
break;
case 6:
state->diversity = 1;
- /* fall through */
+ fallthrough;
case 5:
case 8:
break;
@@ -1966,7 +1966,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
operationMode |= SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through - try first guess DRX_FFTMODE_8K */
+ fallthrough; /* try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K;
if (state->type_A) {
@@ -2139,7 +2139,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
+ fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64;
if (state->type_A) {
@@ -2266,7 +2266,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
break;
default:
operationMode |= SC_RA_RAM_OP_AUTO_RATE__M;
- /* fall through */
+ fallthrough;
case FEC_2_3:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3;
if (state->type_A)
@@ -2301,7 +2301,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->bandwidth_hz) {
case 0:
p->bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
/* (64/7)*(8/8)*1000000 */
bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 0ae9d8c72d8d..32f9346deb3e 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1756,7 +1756,7 @@ static int setoperation_mode(struct drxk_state *state,
goto error;
state->m_operation_mode = OM_NONE;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
status = mpegts_stop(state);
if (status < 0)
@@ -1783,7 +1783,7 @@ static int setoperation_mode(struct drxk_state *state,
if (status < 0)
goto error;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
dprintk(1, ": DVB-C Annex %c\n",
(state->m_operation_mode == OM_QAM_ITU_A) ? 'A' : 'C');
@@ -2012,7 +2012,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
fec_oc_rcn_ctl_rate = 0xC00000;
static_clk = state->m_dvbt_static_clk;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
fec_oc_tmd_mode = 0x0004;
fec_oc_rcn_ctl_rate = 0xD2B4EE; /* good for >63 Mb/s */
@@ -3249,11 +3249,11 @@ static int dvbt_sc_command(struct drxk_state *state,
case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
- /* fall through - All commands using 1 parameters */
+ fallthrough; /* All commands using 1 parameters */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_USER_IO:
status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
- /* fall through - All commands using 0 parameters */
+ fallthrough; /* All commands using 0 parameters */
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
case OFDM_SC_RA_RAM_CMD_NULL:
/* Write command */
@@ -3761,7 +3761,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case TRANSMISSION_MODE_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through - try first guess DRX_FFTMODE_8K */
+ fallthrough; /* try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
@@ -3775,7 +3775,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
default:
case GUARD_INTERVAL_AUTO:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- /* fall through - try first guess DRX_GUARD_1DIV4 */
+ fallthrough; /* try first guess DRX_GUARD_1DIV4 */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
@@ -3798,7 +3798,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
/* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
/* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
- /* fall through */
+ fallthrough;
case HIERARCHY_1:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
break;
@@ -3816,7 +3816,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case QAM_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
+ fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
@@ -3841,7 +3841,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
WR16(dev_addr, OFDM_EC_SB_PRIOR__A,
OFDM_EC_SB_PRIOR_HI));
break;
- case DRX_PRIORITY_UNKNOWN: /* fall through */
+ case DRX_PRIORITY_UNKNOWN:
default:
status = -EINVAL;
goto error;
@@ -3859,7 +3859,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case FEC_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- /* fall through - try first guess DRX_CODERATE_2DIV3 */
+ fallthrough; /* try first guess DRX_CODERATE_2DIV3 */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
@@ -3893,7 +3893,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
switch (state->props.bandwidth_hz) {
case 0:
state->props.bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index d3c330e035c4..722576f1732a 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -768,7 +768,7 @@ static int lgdt3306a_set_if(struct lgdt3306a_state *state,
default:
pr_warn("IF=%d KHz is not supported, 3250 assumed\n",
if_freq_khz);
- /* fallthrough */
+ fallthrough;
case 3250: /* 3.25Mhz */
nco1 = 0x34;
nco2 = 0x00;
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 881897583cf2..399d5c519027 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -201,7 +201,7 @@ static int mt352_set_parameters(struct dvb_frontend *fe)
if (op->hierarchy == HIERARCHY_AUTO ||
op->hierarchy == HIERARCHY_NONE)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 290b9eab099f..4404ace82981 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -739,7 +739,7 @@ static int get_frontend(struct dvb_frontend *fe,
default:
break;
}
- /* Fall through */
+ fallthrough;
case SYS_DVBS:
switch ((enum MXL_HYDRA_MODULATION_E)
reg_data[DMD_MODULATION_SCHEME_ADDR]) {
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 35a3e47497c2..24de1b115158 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -482,7 +482,7 @@ start:
switch (reg&0xff) {
case 0x06:
if (reg & 0x1000) usK = 3 << 24;
- /* fall through */
+ fallthrough;
case 0x43: /* QAM64 */
c = 150204167;
break;
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 89402916d301..c1334d7eb442 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -398,7 +398,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
default:
dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
__func__, KHz);
- /* fall through */
+ fallthrough;
case 5380:
case 44000:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 2fc6aea580f9..2a2cf20a73d6 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -201,7 +201,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe)
break;
default:
c->bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
zl10353_single_write(fe, MCLK_RATIO, 0x75);
zl10353_single_write(fe, 0x64, 0x36);
@@ -258,7 +258,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe)
if (c->hierarchy == HIERARCHY_AUTO ||
c->hierarchy == HIERARCHY_NONE)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 570a4a09c387..03eee606af91 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -2209,7 +2209,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
- /* fall-through */
+ fallthrough;
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
@@ -2370,7 +2370,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
- /* fall-through */
+ fallthrough;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 7cabb9e9ffe2..92fe051c672f 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1310,7 +1310,7 @@ static void dvb_input_detach(struct ddb_input *input)
dvb_unregister_frontend(dvb->fe2);
if (dvb->fe)
dvb_unregister_frontend(dvb->fe);
- /* fallthrough */
+ fallthrough;
case 0x30:
dvb_module_release(dvb->i2c_client[0]);
dvb->i2c_client[0] = NULL;
@@ -1321,22 +1321,22 @@ static void dvb_input_detach(struct ddb_input *input)
dvb_frontend_detach(dvb->fe);
dvb->fe = NULL;
dvb->fe2 = NULL;
- /* fallthrough */
+ fallthrough;
case 0x20:
dvb_net_release(&dvb->dvbnet);
- /* fallthrough */
+ fallthrough;
case 0x12:
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&dvb->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&dvb->mem_frontend);
- /* fallthrough */
+ fallthrough;
case 0x11:
dvb_dmxdev_release(&dvb->dmxdev);
- /* fallthrough */
+ fallthrough;
case 0x10:
dvb_dmx_release(&dvb->demux);
- /* fallthrough */
+ fallthrough;
case 0x01:
break;
}
@@ -1559,7 +1559,7 @@ static int dvb_input_attach(struct ddb_input *input)
osc24 = 0;
else
osc24 = 1;
- /* fall-through */
+ fallthrough;
case DDB_TUNER_DVBCT2_SONY_P:
case DDB_TUNER_DVBC2T2_SONY_P:
case DDB_TUNER_ISDBT_SONY_P:
@@ -1575,7 +1575,7 @@ static int dvb_input_attach(struct ddb_input *input)
break;
case DDB_TUNER_DVBC2T2I_SONY:
osc24 = 1;
- /* fall-through */
+ fallthrough;
case DDB_TUNER_DVBCT2_SONY:
case DDB_TUNER_DVBC2T2_SONY:
case DDB_TUNER_ISDBT_SONY:
@@ -2036,7 +2036,7 @@ static int ddb_port_attach(struct ddb_port *port)
ret = ddb_ci_attach(port, ci_bitrate);
if (ret < 0)
break;
- /* fall-through */
+ fallthrough;
case DDB_PORT_LOOP:
ret = dvb_register_device(port->dvb[0].adap,
&port->dvb[0].dev,
@@ -2432,7 +2432,8 @@ void ddb_ports_init(struct ddb *dev)
ddb_input_init(port, 4 + i, 1, 4 + i);
ddb_output_init(port, i);
break;
- } /* fallthrough */
+ }
+ fallthrough;
case DDB_OCTOPUS:
ddb_input_init(port, 2 * i, 0, 2 * i);
ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1);
@@ -3417,7 +3418,7 @@ int ddb_exit_ddbridge(int stage, int error)
default:
case 2:
destroy_workqueue(ddb_wq);
- /* fall-through */
+ fallthrough;
case 1:
ddb_class_destroy();
break;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 7fb3b1853b87..8944e4bd4638 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -952,7 +952,7 @@ static int meyeioc_sync(struct file *file, void *fh, int *i)
mutex_unlock(&meye.lock);
return -EINTR;
}
- /* fall through */
+ fallthrough;
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index bf36b1e22b63..45228f4f6fc6 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -637,7 +637,7 @@ static void gpioirq(unsigned long cookie)
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
}
- /* fall through */
+ fallthrough;
case DATA_TS_RECORD:
case DATA_PES_RECORD:
@@ -2176,7 +2176,7 @@ static int frontend_init(struct av7110 *av7110)
break;
}
}
- /* fall-thru */
+ fallthrough;
case 0x0008: // Hauppauge/TT DVB-T
// Grundig 29504-401
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index e8a8ec5405e2..93ca31e38ddd 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -1107,7 +1107,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
break;
case OSD_SetRow:
dc->y1 = dc->y0;
- /* fall through */
+ fallthrough;
case OSD_SetBlock:
ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data);
break;
diff --git a/drivers/media/pci/ttpci/av7110_ipack.c b/drivers/media/pci/ttpci/av7110_ipack.c
index ec528fae7333..30330ed01ce8 100644
--- a/drivers/media/pci/ttpci/av7110_ipack.c
+++ b/drivers/media/pci/ttpci/av7110_ipack.c
@@ -182,7 +182,7 @@ int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p)
case DSM_CC_STREAM :
case ISO13522_STREAM:
p->done = 1;
- /* fall through */
+ fallthrough;
case PRIVATE_STREAM1:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 38cac508bd72..3cb83005cf09 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1226,7 +1226,7 @@ static void frontend_init(struct budget_av *budget_av)
* but so far it has been only confirmed for this type
*/
budget_av->reinitialise_demod = 1;
- /* fall through */
+ fallthrough;
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBS_EASYWATCH_1:
if (saa->pci->subsystem_vendor == 0x1894) {
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index 9c811272abfe..a88711a3ac7f 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -613,7 +613,7 @@ static void frontend_init(struct budget *budget)
break;
}
}
- /* fall through */
+ fallthrough;
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
{
struct dvb_frontend *fe;
@@ -638,7 +638,7 @@ static void frontend_init(struct budget *budget)
break;
}
}
- /* fall through */
+ fallthrough;
case 0x101c: { /* TT S2-1600 */
const struct stv6110x_devctl *ctl;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
index 152a713fff78..1a32266b7ddc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
@@ -9,9 +9,11 @@
#if defined(CONFIG_EXYNOS_IOMMU)
+#include <linux/iommu.h>
+
static inline bool exynos_is_iommu_available(struct device *dev)
{
- return dev->archdata.iommu != NULL;
+ return dev_iommu_priv_get(dev) != NULL;
}
#else
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 36e5f2ff4ef1..b22dc1d72527 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -220,7 +220,7 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
break;
case V4L2_PIX_FMT_RGB565:
dataswap ^= 1;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGB565X:
row_coeff = 2;
break;
@@ -802,7 +802,7 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
default:
pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
__func__, bus_fmt);
- /* fall through */
+ fallthrough;
case SH_VOU_BUS_8BIT:
return 1;
case SH_VOU_BUS_16BIT:
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index b203296de977..7e2460263882 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -105,7 +105,8 @@ static inline enum phase_diversity_modes_idx
si476x_phase_diversity_mode_to_idx(enum si476x_phase_diversity_mode mode)
{
switch (mode) {
- default: /* FALLTHROUGH */
+ default:
+ fallthrough;
case SI476X_PHDIV_DISABLED:
return SI476X_IDX_PHDIV_DISABLED;
case SI476X_PHDIV_PRIMARY_COMBINING:
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index b0303cf00387..c37315226c42 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -249,7 +249,7 @@ int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea,
index = BAND_AM;
break;
}
- /* Fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 5bb144435c16..3fe3edd80876 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -112,7 +112,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_printk:
if (perfmon_capable())
return bpf_get_trace_printk_proto();
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 95727ca910f7..0cda78f72fd8 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -64,7 +64,7 @@ static enum rc6_mode rc6_mode(struct rc6_dec *data)
case 6:
if (!data->toggle)
return RC6_MODE_6A;
- /* fall through */
+ fallthrough;
default:
return RC6_MODE_UNKNOWN;
}
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index 9fa58d92eb09..7d9a7c000c75 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -102,7 +102,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
data->state = STATE_FINISHED;
- /* Fall through */
+ fallthrough;
case STATE_FINISHED:
if (ev.pulse)
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 734a92caad8d..7b7d9fe4f945 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -756,7 +756,7 @@ static int xc5000_set_digital_params(struct dvb_frontend *fe)
if (!bw)
bw = 6000000;
/* fall to OFDM handling */
- /* fall through */
+ fallthrough;
case SYS_DMBTH:
case SYS_DVBT:
case SYS_DVBT2:
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 198ddfb8d2b1..e3234d169065 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -525,7 +525,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
case USB_SPEED_HIGH:
info("running at HIGH speed.");
break;
- case USB_SPEED_UNKNOWN: /* fall through */
+ case USB_SPEED_UNKNOWN:
default:
err("cannot handle USB speed because it is unknown.");
return -ENODEV;
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 20c50c2d042e..e747548ab286 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -165,7 +165,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_BRIGHTNESS:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_BRIGHTNESS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -176,7 +176,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_CONTRAST:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_CONTRAST:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -184,7 +184,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_SATURATION:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_SATURATION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -195,7 +195,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_GPIO_DATA:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -203,7 +203,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_GPIO_DIRECTION:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -211,7 +211,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DATA:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_MP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -219,7 +219,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION:
cmd.buffer.block_data[0] = param;
- /*fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -234,7 +234,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_FLICKER_MODES:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_FLICKER_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -281,7 +281,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_USER_MODE:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_USER_MODE:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -301,7 +301,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_WAKEUP:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_WAKEUP:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -309,7 +309,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_PW_CONTROL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_PW_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -322,7 +322,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_SYSTEM_CTRL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_SYSTEM_CTRL:
cmd.req_mode =
CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
@@ -331,7 +331,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_SYSTEM_CTRL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_SYSTEM_CTRL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -339,7 +339,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VP_EXP_MODES:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_EXP_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -347,7 +347,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_DEVICE_CONFIG:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_DEVICE_CONFIG:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -368,7 +368,7 @@ int cpia2_do_command(struct camera_data *cam,
break;
case CPIA2_CMD_SET_VC_CONTROL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -403,7 +403,7 @@ int cpia2_do_command(struct camera_data *cam,
this register can also affect
flicker modes */
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_USER_EFFECTS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -1751,7 +1751,7 @@ int cpia2_set_fps(struct camera_data *cam, int framerate)
CPIA2_VP_SENSOR_FLAGS_500) {
return -EINVAL;
}
- /* Fall through */
+ fallthrough;
case CPIA2_VP_FRAMERATE_15:
case CPIA2_VP_FRAMERATE_12_5:
case CPIA2_VP_FRAMERATE_7_5:
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index d9f953f2d088..425e470b0fd3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -996,7 +996,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
/* The DVB core will handle it */
if (dev->tuner_type == TUNER_ABSENT)
continue;
- /* fall through */
+ fallthrough;
default: /* just to shut up a gcc warning */
ent->function = MEDIA_ENT_F_CONN_RF;
break;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 4ef3fa98d20f..52e648e2713a 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1659,14 +1659,14 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
switch (band) {
default:
- deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency);
- /* fall through */
+ deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency);
+ fallthrough;
case BAND_VHF:
- state->dib8000_ops.set_gpio(fe, 3, 0, 1);
- break;
+ state->dib8000_ops.set_gpio(fe, 3, 0, 1);
+ break;
case BAND_UHF:
- state->dib8000_ops.set_gpio(fe, 3, 0, 0);
- break;
+ state->dib8000_ops.set_gpio(fe, 3, 0, 0);
+ break;
}
ret = state->set_param_save(fe);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index f96626fe2c0b..a27a68440325 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1886,12 +1886,12 @@ static int dw2102_load_firmware(struct usb_device *dev,
switch (le16_to_cpu(dev->descriptor.idProduct)) {
case USB_PID_TEVII_S650:
dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
- /* fall through */
+ fallthrough;
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
DW210X_WRITE_MSG);
- /* fall through */
+ fallthrough;
case USB_PID_DW3101:
reset = 0;
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
@@ -1924,7 +1924,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
break;
}
}
- /* fall through */
+ fallthrough;
case 0x2101:
dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2,
DW210X_READ_MSG);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 3f3fbcd60cc6..45a2403aa039 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -2200,7 +2200,7 @@ static int check_range(enum v4l2_ctrl_type type,
case V4L2_CTRL_TYPE_BOOLEAN:
if (step != 1 || max > 1 || min < 0)
return -ERANGE;
- /* fall through */
+ fallthrough;
case V4L2_CTRL_TYPE_U8:
case V4L2_CTRL_TYPE_U16:
case V4L2_CTRL_TYPE_U32:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index a556880f225a..2a22e13a6303 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -782,7 +782,6 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
@@ -816,7 +815,6 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
p->stepwise.step.denominator);
break;
case V4L2_FRMIVAL_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 5c91fc3e65b5..606a271bdd2d 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -354,7 +354,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
break;
case VIDEOBUF_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
+ fallthrough;
case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index e154bea3cf14..c21262502581 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -239,6 +239,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
.larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask =
+ BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13),
+ /* DUMMY | IPU0 | IPU1 | CCU | MDLA */
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
.has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
@@ -260,6 +267,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.data = &mtk_smi_larb_mt2712
},
{
+ .compatible = "mediatek,mt6779-smi-larb",
+ .data = &mtk_smi_larb_mt6779
+ },
+ {
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
},
@@ -388,6 +399,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
.gen = MTK_SMI_GEN2,
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.gen = MTK_SMI_GEN2,
.has_gals = true,
@@ -409,6 +427,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.data = &mtk_smi_common_gen2,
},
{
+ .compatible = "mediatek,mt6779-smi-common",
+ .data = &mtk_smi_common_mt6779,
+ },
+ {
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,
},
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index f512cbc7a36c..ca0097664b12 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -313,7 +313,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
tick_ps *= div;
break;
case GPMC_CD_FCLK:
- /* FALL-THROUGH */
default:
break;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index d9ee8e3dc72d..178954228631 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -371,7 +371,7 @@ again:
serial mode), then just fall through */
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_RP_RECEIVE_INT_REQ_RESULT:
intreg = mrq->data[0];
@@ -403,7 +403,7 @@ again:
case MSB_RP_RECEIVE_STATUS_REG:
msb->regs.status = *(struct ms_status_register *)mrq->data;
msb->state = MSB_RP_SEND_OOB_READ;
- /* fallthrough */
+ fallthrough;
case MSB_RP_SEND_OOB_READ:
if (!msb_read_regs(msb,
@@ -418,7 +418,7 @@ again:
msb->regs.extra_data =
*(struct ms_extra_data_register *) mrq->data;
msb->state = MSB_RP_SEND_READ_DATA;
- /* fallthrough */
+ fallthrough;
case MSB_RP_SEND_READ_DATA:
/* Skip that state if we only read the oob */
@@ -518,7 +518,7 @@ again:
msb->state = MSB_WB_RECEIVE_INT_REQ;
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_WB_RECEIVE_INT_REQ:
intreg = mrq->data[0];
@@ -549,7 +549,7 @@ again:
msb->int_polling = false;
msb->state = MSB_WB_SEND_WRITE_DATA;
- /* fallthrough */
+ fallthrough;
case MSB_WB_SEND_WRITE_DATA:
sg_init_table(sg, ARRAY_SIZE(sg));
@@ -628,7 +628,7 @@ again:
msb->state = MSB_SC_RECEIVE_INT_REQ;
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_SC_RECEIVE_INT_REQ:
intreg = mrq->data[0];
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 4a6b866b0291..e83c3ada9389 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -255,11 +255,11 @@ static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host,
case 3:
host->io_word[0] |= buf[off + 2] << 16;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 2:
host->io_word[0] |= buf[off + 1] << 8;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 1:
host->io_word[0] |= buf[off];
host->io_pos++;
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index fc35c7404429..786e46798da2 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -162,11 +162,11 @@ static unsigned int tifm_ms_write_data(struct tifm_ms *host,
case 3:
host->io_word |= buf[off + 2] << 16;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 2:
host->io_word |= buf[off + 1] << 8;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 1:
host->io_word |= buf[off];
host->io_pos++;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5216487db4fb..9903e9660a38 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -642,7 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
- /* fall through */
+ fallthrough;
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
@@ -1887,7 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
- /* fall through */
+ fallthrough;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
@@ -1932,7 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
- /* fall through */
+ fallthrough;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 6a79cd0ebe2b..18b91ea1a353 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4326,7 +4326,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
}
}
mpt_findImVolumes(ioc);
- /* fall through */
+ fallthrough;
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 1491561d2e5c..8543f0324d5a 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -784,7 +784,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/*
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
*/
- /* Fall through */
+ fallthrough;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
/* Linux handles an unsolicited DID_RESET better
@@ -881,7 +881,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
scsi_set_resid(sc, 0);
- /* Fall through */
+ fallthrough;
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a37d7d171382..33df0837ab41 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1193,18 +1193,6 @@ config MFD_SKY81452
This driver can also be built as a module. If so, the module
will be called sky81452.
-config MFD_SMSC
- bool "SMSC ECE1099 series chips"
- depends on I2C=y
- select MFD_CORE
- select REGMAP_I2C
- help
- If you say yes here you get support for the
- ece1099 chips from SMSC.
-
- To compile this driver as a module, choose M here: the
- module will be called smsc.
-
config MFD_SC27XX_PMIC
tristate "Spreadtrum SC27xx PMICs"
depends on ARCH_SPRD || COMPILE_TEST
@@ -2053,6 +2041,27 @@ config MFD_WCD934X
This driver provides common support WCD934x audio codec and its
associated Pin Controller, Soundwire Controller and Audio codec.
+config MFD_KHADAS_MCU
+ tristate "Support for Khadas System control Microcontroller"
+ depends on I2C
+ depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the Khadas System control Microcontroller interface
+ present on their VIM and Edge boards.
+
+ This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and
+ Edge boards.
+
+ It provides multiple boot control features like password check,
+ power-on options, power-off control and system FAN control on recent
+ boards.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9367a92f795a..a60e5f835283 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -127,7 +127,6 @@ obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o
obj-$(CONFIG_MCP) += mcp-core.o
obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
-obj-$(CONFIG_MFD_SMSC) += smsc-ece1099.o
obj-$(CONFIG_MCP_UCB1200_TS) += ucb1x00-ts.o
ifeq ($(CONFIG_SA1100_ASSABET),y)
@@ -262,5 +261,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o
obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
+obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 57723f116bb5..ee71ae04b5e6 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -498,7 +498,7 @@ static ssize_t ab3100_get_set_reg(struct file *file,
int i = 0;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min((ssize_t)count, (ssize_t)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index c4751fb9bc22..c393102e3a39 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -29,22 +29,22 @@
/**
* struct ab3100_otp
- * @dev containing device
- * @locked whether the OTP is locked, after locking, no more bits
+ * @dev: containing device
+ * @locked: whether the OTP is locked, after locking, no more bits
* can be changed but before locking it is still possible
* to change bits from 1->0.
- * @freq clocking frequency for the OTP, this frequency is either
+ * @freq: clocking frequency for the OTP, this frequency is either
* 32768Hz or 1MHz/30
- * @paf product activation flag, indicates whether this is a real
+ * @paf: product activation flag, indicates whether this is a real
* product (paf true) or a lab board etc (paf false)
- * @imeich if this is set it is possible to override the
+ * @imeich: if this is set it is possible to override the
* IMEI number found in the tac, fac and svn fields with
* (secured) software
- * @cid customer ID
- * @tac type allocation code of the IMEI
- * @fac final assembly code of the IMEI
- * @svn software version number of the IMEI
- * @debugfs a debugfs file used when dumping to file
+ * @cid: customer ID
+ * @tac: type allocation code of the IMEI
+ * @fac: final assembly code of the IMEI
+ * @svn: software version number of the IMEI
+ * @debugfs: a debugfs file used when dumping to file
*/
struct ab3100_otp {
struct device *dev;
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 1a9a3414d4fa..6d1bf7c3ca3b 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1801,7 +1801,7 @@ static ssize_t ab8500_hwreg_write(struct file *file,
int buf_size, ret;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min((int)count, (int)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index d2a13a547a3c..41076d121dd5 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -22,11 +22,9 @@
/**
* struct altr_sysmgr - Altera SOCFPGA System Manager
* @regmap: the regmap used for System Manager accesses.
- * @base : the base address for the System Manager
*/
struct altr_sysmgr {
struct regmap *regmap;
- resource_size_t *base;
};
static struct platform_driver altr_sysmgr_driver;
@@ -91,6 +89,9 @@ static struct regmap_config altr_sysmgr_regmap_cfg = {
* altr_sysmgr_regmap_lookup_by_phandle
* Find the sysmgr previous configured in probe() and return regmap property.
* Return: regmap if found or error if not found.
+ *
+ * @np: Pointer to device's Device Tree node
+ * @property: Device Tree property name which references the sysmgr
*/
struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
const char *property)
@@ -127,6 +128,7 @@ static int sysmgr_probe(struct platform_device *pdev)
struct regmap_config sysmgr_config = altr_sysmgr_regmap_cfg;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ void __iomem *base;
sysmgr = devm_kzalloc(dev, sizeof(*sysmgr), GFP_KERNEL);
if (!sysmgr)
@@ -139,22 +141,19 @@ static int sysmgr_probe(struct platform_device *pdev)
sysmgr_config.max_register = resource_size(res) -
sysmgr_config.reg_stride;
if (of_device_is_compatible(np, "altr,sys-mgr-s10")) {
- /* Need physical address for SMCC call */
- sysmgr->base = (resource_size_t *)res->start;
sysmgr_config.reg_read = s10_protected_reg_read;
sysmgr_config.reg_write = s10_protected_reg_write;
- regmap = devm_regmap_init(dev, NULL, sysmgr->base,
+ /* Need physical address for SMCC call */
+ regmap = devm_regmap_init(dev, NULL, (void *)res->start,
&sysmgr_config);
} else {
- sysmgr->base = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!sysmgr->base)
+ base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!base)
return -ENOMEM;
sysmgr_config.max_register = res->end - res->start - 3;
- regmap = devm_regmap_init_mmio(dev, sysmgr->base,
- &sysmgr_config);
+ regmap = devm_regmap_init_mmio(dev, base, &sysmgr_config);
}
if (IS_ERR(regmap)) {
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index f73cf76d1373..000cb82023e3 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -80,7 +80,7 @@ int arizona_clk32k_disable(struct arizona *arizona)
{
mutex_lock(&arizona->clk_lock);
- BUG_ON(arizona->clk32k_ref <= 0);
+ WARN_ON(arizona->clk32k_ref <= 0);
arizona->clk32k_ref--;
@@ -1426,6 +1426,15 @@ err_irq:
arizona_irq_exit(arizona);
err_pm:
pm_runtime_disable(arizona->dev);
+
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ case ARIZONA_32KZ_MCLK2:
+ arizona_clk32k_disable(arizona);
+ break;
+ default:
+ break;
+ }
err_reset:
arizona_enable_reset(arizona);
regulator_disable(arizona->dcvdd);
@@ -1448,6 +1457,15 @@ int arizona_dev_exit(struct arizona *arizona)
regulator_disable(arizona->dcvdd);
regulator_put(arizona->dcvdd);
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ case ARIZONA_32KZ_MCLK2:
+ arizona_clk32k_disable(arizona);
+ break;
+ default:
+ break;
+ }
+
mfd_remove_devices(arizona->dev);
arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index 1fa2ec950e7d..d96f1d689e7f 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_cycle);
* atmel_smc_cs_conf_apply - apply an SMC CS conf
* @regmap: the SMC regmap
* @cs: the CS id
- * @conf the SMC CS conf to apply
+ * @conf: the SMC CS conf to apply
*
* Applies an SMC CS configuration.
* Only valid on at91sam9/avr32 SoCs.
@@ -257,7 +257,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_apply);
* @regmap: the HSMC regmap
* @cs: the CS id
* @layout: the layout of registers
- * @conf the SMC CS conf to apply
+ * @conf: the SMC CS conf to apply
*
* Applies an SMC CS configuration.
* Only valid on post-sama5 SoCs.
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 14f9df74f855..068e9c083f13 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -63,6 +63,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
{ .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID },
{ .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ },
};
@@ -74,11 +75,13 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp209", 0 },
{ "axp221", 0 },
{ "axp223", 0 },
+ { "axp803", 0 },
{ "axp806", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
{
.id = "INT33F4",
@@ -87,6 +90,7 @@ static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, axp20x_i2c_acpi_match);
+#endif
static struct i2c_driver axp20x_i2c_driver = {
.driver = {
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 32c2b912b58b..d07b43d7c761 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -24,7 +24,7 @@ static struct class cros_class = {
};
/**
- * cros_feature_to_name - CrOS feature id to name/short description.
+ * struct cros_feature_to_name - CrOS feature id to name/short description.
* @id: The feature identifier.
* @name: Device name associated with the feature id.
* @desc: Short name that will be displayed.
@@ -36,7 +36,7 @@ struct cros_feature_to_name {
};
/**
- * cros_feature_to_cells - CrOS feature id to mfd cells association.
+ * struct cros_feature_to_cells - CrOS feature id to mfd cells association.
* @id: The feature identifier.
* @mfd_cells: Pointer to the array of mfd cells that needs to be added.
* @num_cells: Number of mfd cells into the array.
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index b125f90dd375..a353d52210a9 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -160,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063)
int da9063_device_init(struct da9063 *da9063, unsigned int irq)
{
- int model, variant_id, variant_code;
int ret;
ret = da9063_clear_fault_log(da9063);
@@ -171,36 +170,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
da9063->irq_base = -1;
da9063->chip_irq = irq;
- ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
- if (ret < 0) {
- dev_err(da9063->dev, "Cannot read chip model id.\n");
- return -EIO;
- }
- if (model != PMIC_CHIP_ID_DA9063) {
- dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model);
- return -ENODEV;
- }
-
- ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id);
- if (ret < 0) {
- dev_err(da9063->dev, "Cannot read chip variant id.\n");
- return -EIO;
- }
-
- variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT;
-
- dev_info(da9063->dev,
- "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
- model, variant_id);
-
- if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) {
- dev_err(da9063->dev,
- "Cannot support variant code: 0x%02X\n", variant_code);
- return -ENODEV;
- }
-
- da9063->variant_code = variant_code;
-
ret = da9063_irq_init(da9063);
if (ret) {
dev_err(da9063->dev, "Cannot initialize interrupts.\n");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 455de74c0dd2..b8217ad303ce 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -22,12 +22,124 @@
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
+/*
+ * Raw I2C access required for just accessing chip and variant info before we
+ * know which device is present. The info read from the device using this
+ * approach is then used to select the correct regmap tables.
+ */
+
+#define DA9063_REG_PAGE_SIZE 0x100
+#define DA9063_REG_PAGED_ADDR_MASK 0xFF
+
+enum da9063_page_sel_buf_fmt {
+ DA9063_PAGE_SEL_BUF_PAGE_REG = 0,
+ DA9063_PAGE_SEL_BUF_PAGE_VAL,
+ DA9063_PAGE_SEL_BUF_SIZE,
+};
+
+enum da9063_paged_read_msgs {
+ DA9063_PAGED_READ_MSG_PAGE_SEL = 0,
+ DA9063_PAGED_READ_MSG_REG_SEL,
+ DA9063_PAGED_READ_MSG_DATA,
+ DA9063_PAGED_READ_MSG_CNT,
+};
+
+static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr,
+ u8 *buf, int count)
+{
+ struct i2c_msg xfer[DA9063_PAGED_READ_MSG_CNT];
+ u8 page_sel_buf[DA9063_PAGE_SEL_BUF_SIZE];
+ u8 page_num, paged_addr;
+ int ret;
+
+ /* Determine page info based on register address */
+ page_num = (addr / DA9063_REG_PAGE_SIZE);
+ if (page_num > 1) {
+ dev_err(&client->dev, "Invalid register address provided\n");
+ return -EINVAL;
+ }
+
+ paged_addr = (addr % DA9063_REG_PAGE_SIZE) & DA9063_REG_PAGED_ADDR_MASK;
+ page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_REG] = DA9063_REG_PAGE_CON;
+ page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_VAL] =
+ (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK;
+
+ /* Write reg address, page selection */
+ xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr;
+ xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0;
+ xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE;
+ xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf;
+
+ /* Select register address */
+ xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr;
+ xfer[DA9063_PAGED_READ_MSG_REG_SEL].flags = 0;
+ xfer[DA9063_PAGED_READ_MSG_REG_SEL].len = sizeof(paged_addr);
+ xfer[DA9063_PAGED_READ_MSG_REG_SEL].buf = &paged_addr;
+
+ /* Read data */
+ xfer[DA9063_PAGED_READ_MSG_DATA].addr = client->addr;
+ xfer[DA9063_PAGED_READ_MSG_DATA].flags = I2C_M_RD;
+ xfer[DA9063_PAGED_READ_MSG_DATA].len = count;
+ xfer[DA9063_PAGED_READ_MSG_DATA].buf = buf;
+
+ ret = i2c_transfer(client->adapter, xfer, DA9063_PAGED_READ_MSG_CNT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Paged block read failed: %d\n", ret);
+ return ret;
+ }
+
+ if (ret != DA9063_PAGED_READ_MSG_CNT) {
+ dev_err(&client->dev, "Paged block read failed to complete\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+enum {
+ DA9063_DEV_ID_REG = 0,
+ DA9063_VAR_ID_REG,
+ DA9063_CHIP_ID_REGS,
+};
+
+static int da9063_get_device_type(struct i2c_client *i2c, struct da9063 *da9063)
+{
+ u8 buf[DA9063_CHIP_ID_REGS];
+ int ret;
+
+ ret = da9063_i2c_blockreg_read(i2c, DA9063_REG_DEVICE_ID, buf,
+ DA9063_CHIP_ID_REGS);
+ if (ret)
+ return ret;
+
+ if (buf[DA9063_DEV_ID_REG] != PMIC_CHIP_ID_DA9063) {
+ dev_err(da9063->dev,
+ "Invalid chip device ID: 0x%02x\n",
+ buf[DA9063_DEV_ID_REG]);
+ return -ENODEV;
+ }
+
+ dev_info(da9063->dev,
+ "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
+ buf[DA9063_DEV_ID_REG], buf[DA9063_VAR_ID_REG]);
+
+ da9063->variant_code =
+ (buf[DA9063_VAR_ID_REG] & DA9063_VARIANT_ID_MRC_MASK)
+ >> DA9063_VARIANT_ID_MRC_SHIFT;
+
+ return 0;
+}
+
+/*
+ * Variant specific regmap configs
+ */
+
static const struct regmap_range da9063_ad_readable_ranges[] = {
regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D),
regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19),
- regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+ regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
};
static const struct regmap_range da9063_ad_writeable_ranges[] = {
@@ -72,7 +184,7 @@ static const struct regmap_range da9063_bb_readable_ranges[] = {
regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
- regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+ regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
};
static const struct regmap_range da9063_bb_writeable_ranges[] = {
@@ -85,7 +197,7 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = {
regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
};
-static const struct regmap_range da9063_bb_volatile_ranges[] = {
+static const struct regmap_range da9063_bb_da_volatile_ranges[] = {
regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
@@ -107,9 +219,9 @@ static const struct regmap_access_table da9063_bb_writeable_table = {
.n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges),
};
-static const struct regmap_access_table da9063_bb_volatile_table = {
- .yes_ranges = da9063_bb_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges),
+static const struct regmap_access_table da9063_bb_da_volatile_table = {
+ .yes_ranges = da9063_bb_da_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_bb_da_volatile_ranges),
};
static const struct regmap_range da9063l_bb_readable_ranges[] = {
@@ -117,7 +229,7 @@ static const struct regmap_range da9063l_bb_readable_ranges[] = {
regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
- regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+ regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
};
static const struct regmap_range da9063l_bb_writeable_ranges[] = {
@@ -129,7 +241,7 @@ static const struct regmap_range da9063l_bb_writeable_ranges[] = {
regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
};
-static const struct regmap_range da9063l_bb_volatile_ranges[] = {
+static const struct regmap_range da9063l_bb_da_volatile_ranges[] = {
regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
@@ -151,15 +263,70 @@ static const struct regmap_access_table da9063l_bb_writeable_table = {
.n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges),
};
-static const struct regmap_access_table da9063l_bb_volatile_table = {
- .yes_ranges = da9063l_bb_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges),
+static const struct regmap_access_table da9063l_bb_da_volatile_table = {
+ .yes_ranges = da9063l_bb_da_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_bb_da_volatile_ranges),
+};
+
+static const struct regmap_range da9063_da_readable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11),
+ regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
+};
+
+static const struct regmap_range da9063_da_writeable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+ regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+ regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+ regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11),
+};
+
+static const struct regmap_access_table da9063_da_readable_table = {
+ .yes_ranges = da9063_da_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_da_readable_ranges),
+};
+
+static const struct regmap_access_table da9063_da_writeable_table = {
+ .yes_ranges = da9063_da_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_da_writeable_ranges),
+};
+
+static const struct regmap_range da9063l_da_readable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11),
+ regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
+};
+
+static const struct regmap_range da9063l_da_writeable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+ regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+ regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11),
+};
+
+static const struct regmap_access_table da9063l_da_readable_table = {
+ .yes_ranges = da9063l_da_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_da_readable_ranges),
+};
+
+static const struct regmap_access_table da9063l_da_writeable_table = {
+ .yes_ranges = da9063l_da_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_da_writeable_ranges),
};
static const struct regmap_range_cfg da9063_range_cfg[] = {
{
.range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_CHIP_VARIANT,
+ .range_max = DA9063_REG_CONFIG_ID,
.selector_reg = DA9063_REG_PAGE_CON,
.selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT,
.selector_shift = DA9063_I2C_PAGE_SEL_SHIFT,
@@ -173,7 +340,7 @@ static struct regmap_config da9063_regmap_config = {
.val_bits = 8,
.ranges = da9063_range_cfg,
.num_ranges = ARRAY_SIZE(da9063_range_cfg),
- .max_register = DA9063_REG_CHIP_VARIANT,
+ .max_register = DA9063_REG_CONFIG_ID,
.cache_type = REGCACHE_RBTREE,
};
@@ -199,18 +366,72 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
da9063->chip_irq = i2c->irq;
da9063->type = id->driver_data;
- if (da9063->variant_code == PMIC_DA9063_AD) {
- da9063_regmap_config.rd_table = &da9063_ad_readable_table;
- da9063_regmap_config.wr_table = &da9063_ad_writeable_table;
- da9063_regmap_config.volatile_table = &da9063_ad_volatile_table;
- } else if (da9063->type == PMIC_TYPE_DA9063L) {
- da9063_regmap_config.rd_table = &da9063l_bb_readable_table;
- da9063_regmap_config.wr_table = &da9063l_bb_writeable_table;
- da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table;
- } else {
- da9063_regmap_config.rd_table = &da9063_bb_readable_table;
- da9063_regmap_config.wr_table = &da9063_bb_writeable_table;
- da9063_regmap_config.volatile_table = &da9063_bb_volatile_table;
+ ret = da9063_get_device_type(i2c, da9063);
+ if (ret)
+ return ret;
+
+ switch (da9063->type) {
+ case PMIC_TYPE_DA9063:
+ switch (da9063->variant_code) {
+ case PMIC_DA9063_AD:
+ da9063_regmap_config.rd_table =
+ &da9063_ad_readable_table;
+ da9063_regmap_config.wr_table =
+ &da9063_ad_writeable_table;
+ da9063_regmap_config.volatile_table =
+ &da9063_ad_volatile_table;
+ break;
+ case PMIC_DA9063_BB:
+ case PMIC_DA9063_CA:
+ da9063_regmap_config.rd_table =
+ &da9063_bb_readable_table;
+ da9063_regmap_config.wr_table =
+ &da9063_bb_writeable_table;
+ da9063_regmap_config.volatile_table =
+ &da9063_bb_da_volatile_table;
+ break;
+ case PMIC_DA9063_DA:
+ da9063_regmap_config.rd_table =
+ &da9063_da_readable_table;
+ da9063_regmap_config.wr_table =
+ &da9063_da_writeable_table;
+ da9063_regmap_config.volatile_table =
+ &da9063_bb_da_volatile_table;
+ break;
+ default:
+ dev_err(da9063->dev,
+ "Chip variant not supported for DA9063\n");
+ return -ENODEV;
+ }
+ break;
+ case PMIC_TYPE_DA9063L:
+ switch (da9063->variant_code) {
+ case PMIC_DA9063_BB:
+ case PMIC_DA9063_CA:
+ da9063_regmap_config.rd_table =
+ &da9063l_bb_readable_table;
+ da9063_regmap_config.wr_table =
+ &da9063l_bb_writeable_table;
+ da9063_regmap_config.volatile_table =
+ &da9063l_bb_da_volatile_table;
+ break;
+ case PMIC_DA9063_DA:
+ da9063_regmap_config.rd_table =
+ &da9063l_da_readable_table;
+ da9063_regmap_config.wr_table =
+ &da9063l_da_writeable_table;
+ da9063_regmap_config.volatile_table =
+ &da9063l_bb_da_volatile_table;
+ break;
+ default:
+ dev_err(da9063->dev,
+ "Chip variant not supported for DA9063L\n");
+ return -ENODEV;
+ }
+ break;
+ default:
+ dev_err(da9063->dev, "Chip type not supported\n");
+ return -ENODEV;
}
da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 0452b43b0423..a5983d515db0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -1515,10 +1515,10 @@ static unsigned long dsiclk_rate(u8 n)
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
div *= 2;
- /* Fall through */
+ fallthrough;
case PRCM_DSI_PLLOUT_SEL_PHI_2:
div *= 2;
- /* Fall through */
+ fallthrough;
case PRCM_DSI_PLLOUT_SEL_PHI:
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW) / div;
@@ -2276,6 +2276,8 @@ bool db8500_prcmu_is_ac_wake_requested(void)
*
* Saves the reset reason code and then sets the APE_SOFTRST register which
* fires interrupt to fw
+ *
+ * @reset_code: The reason for system reset
*/
void db8500_prcmu_system_reset(u16 reset_code)
{
@@ -3004,10 +3006,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent)
return mfd_add_devices(parent, 0, ab850x_cell, 1, NULL, 0, NULL);
}
-/**
- * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
- *
- */
static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 39276fa626d2..83e676a096dc 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
len = urb->actual_length - sizeof(struct dln2_header);
if (handle == DLN2_HANDLE_EVENT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dln2->event_cb_lock, flags);
dln2_run_event_callbacks(dln2, id, echo, data, len);
+ spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
} else {
/* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
if (dln2_transfer_complete(dln2, urb, handle, echo))
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index edfc172b8607..eba88b80d969 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -5,7 +5,7 @@
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
* Copyright (c) <2013-2017> Linaro Ltd.
- * http://www.linaro.org
+ * https://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
*/
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 046222684b8b..9a58032f818a 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -201,6 +201,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+ /* EBG */
+ { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
/* GLK */
{ PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
@@ -230,6 +233,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
+ /* TGL-H */
+ { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
/* EHL */
{ PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c
index bd94c989d232..71da861e8c27 100644
--- a/drivers/mfd/intel_soc_pmic_mrfld.c
+++ b/drivers/mfd/intel_soc_pmic_mrfld.c
@@ -91,13 +91,8 @@ static int bcove_ipc_byte_reg_write(void *context, unsigned int reg,
{
struct intel_soc_pmic *pmic = context;
u8 ipc_in = val;
- int ret;
- ret = intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in);
- if (ret)
- return ret;
-
- return 0;
+ return intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in);
}
static const struct regmap_config bcove_regmap_config = {
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index af764bc87d7c..761b4ef3a381 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -136,7 +136,7 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
if (val & IQS620_PROX_SETTINGS_4_SAR_EN)
iqs62x->ui_sel = IQS62X_UI_SAR1;
- /* fall through */
+ fallthrough;
case IQS621_PROD_NUM:
ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
@@ -470,7 +470,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context)
case IQS62X_EVENT_UI_LO:
event_data.ui_data = get_unaligned_le16(&event_map[i]);
- /* fall through */
+ fallthrough;
case IQS62X_EVENT_UI_HI:
case IQS62X_EVENT_NONE:
@@ -491,7 +491,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context)
case IQS62X_EVENT_HYST:
event_map[i] <<= iqs62x->dev_desc->hyst_shift;
- /* fall through */
+ fallthrough;
case IQS62X_EVENT_WHEEL:
case IQS62X_EVENT_HALL:
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f48e21d8b97c..52bec01149e5 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -79,39 +79,31 @@ enum kempld_cells {
KEMPLD_UART,
};
-static const struct mfd_cell kempld_devs[] = {
- [KEMPLD_I2C] = {
- .name = "kempld-i2c",
- },
- [KEMPLD_WDT] = {
- .name = "kempld-wdt",
- },
- [KEMPLD_GPIO] = {
- .name = "kempld-gpio",
- },
- [KEMPLD_UART] = {
- .name = "kempld-uart",
- },
+static const char *kempld_dev_names[] = {
+ [KEMPLD_I2C] = "kempld-i2c",
+ [KEMPLD_WDT] = "kempld-wdt",
+ [KEMPLD_GPIO] = "kempld-gpio",
+ [KEMPLD_UART] = "kempld-uart",
};
-#define KEMPLD_MAX_DEVS ARRAY_SIZE(kempld_devs)
+#define KEMPLD_MAX_DEVS ARRAY_SIZE(kempld_dev_names)
static int kempld_register_cells_generic(struct kempld_device_data *pld)
{
- struct mfd_cell devs[KEMPLD_MAX_DEVS];
+ struct mfd_cell devs[KEMPLD_MAX_DEVS] = {};
int i = 0;
if (pld->feature_mask & KEMPLD_FEATURE_BIT_I2C)
- devs[i++] = kempld_devs[KEMPLD_I2C];
+ devs[i++].name = kempld_dev_names[KEMPLD_I2C];
if (pld->feature_mask & KEMPLD_FEATURE_BIT_WATCHDOG)
- devs[i++] = kempld_devs[KEMPLD_WDT];
+ devs[i++].name = kempld_dev_names[KEMPLD_WDT];
if (pld->feature_mask & KEMPLD_FEATURE_BIT_GPIO)
- devs[i++] = kempld_devs[KEMPLD_GPIO];
+ devs[i++].name = kempld_dev_names[KEMPLD_GPIO];
if (pld->feature_mask & KEMPLD_FEATURE_MASK_UART)
- devs[i++] = kempld_devs[KEMPLD_UART];
+ devs[i++].name = kempld_dev_names[KEMPLD_UART];
return mfd_add_devices(pld->dev, -1, devs, i, NULL, 0, NULL);
}
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
new file mode 100644
index 000000000000..44d5bb462dab
--- /dev/null
+++ b/drivers/mfd/khadas-mcu.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Khadas System control Microcontroller
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/khadas-mcu.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg)
+{
+ if (reg >= KHADAS_MCU_USER_DATA_0_REG &&
+ reg < KHADAS_MCU_PWR_OFF_CMD_REG)
+ return true;
+
+ switch (reg) {
+ case KHADAS_MCU_PWR_OFF_CMD_REG:
+ case KHADAS_MCU_PASSWD_START_REG:
+ case KHADAS_MCU_CHECK_VEN_PASSWD_REG:
+ case KHADAS_MCU_CHECK_USER_PASSWD_REG:
+ case KHADAS_MCU_WOL_INIT_START_REG:
+ case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case KHADAS_MCU_PASSWD_VEN_0_REG:
+ case KHADAS_MCU_PASSWD_VEN_1_REG:
+ case KHADAS_MCU_PASSWD_VEN_2_REG:
+ case KHADAS_MCU_PASSWD_VEN_3_REG:
+ case KHADAS_MCU_PASSWD_VEN_4_REG:
+ case KHADAS_MCU_PASSWD_VEN_5_REG:
+ case KHADAS_MCU_MAC_0_REG:
+ case KHADAS_MCU_MAC_1_REG:
+ case KHADAS_MCU_MAC_2_REG:
+ case KHADAS_MCU_MAC_3_REG:
+ case KHADAS_MCU_MAC_4_REG:
+ case KHADAS_MCU_MAC_5_REG:
+ case KHADAS_MCU_USID_0_REG:
+ case KHADAS_MCU_USID_1_REG:
+ case KHADAS_MCU_USID_2_REG:
+ case KHADAS_MCU_USID_3_REG:
+ case KHADAS_MCU_USID_4_REG:
+ case KHADAS_MCU_USID_5_REG:
+ case KHADAS_MCU_VERSION_0_REG:
+ case KHADAS_MCU_VERSION_1_REG:
+ case KHADAS_MCU_DEVICE_NO_0_REG:
+ case KHADAS_MCU_DEVICE_NO_1_REG:
+ case KHADAS_MCU_FACTORY_TEST_REG:
+ case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config khadas_mcu_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
+ .volatile_reg = khadas_mcu_reg_volatile,
+ .writeable_reg = khadas_mcu_reg_writeable,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct mfd_cell khadas_mcu_fan_cells[] = {
+ /* VIM1/2 Rev13+ and VIM3 only */
+ { .name = "khadas-mcu-fan-ctrl", },
+};
+
+static struct mfd_cell khadas_mcu_cells[] = {
+ { .name = "khadas-mcu-user-mem", },
+};
+
+static int khadas_mcu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct khadas_mcu *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ddata);
+
+ ddata->dev = dev;
+
+ ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ khadas_mcu_cells,
+ ARRAY_SIZE(khadas_mcu_cells),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ if (of_find_property(dev->of_node, "#cooling-cells", NULL))
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ khadas_mcu_fan_cells,
+ ARRAY_SIZE(khadas_mcu_fan_cells),
+ NULL, 0, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id khadas_mcu_of_match[] = {
+ { .compatible = "khadas,mcu", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, khadas_mcu_of_match);
+
+static struct i2c_driver khadas_mcu_driver = {
+ .driver = {
+ .name = "khadas-mcu-core",
+ .of_match_table = of_match_ptr(khadas_mcu_of_match),
+ },
+ .probe = khadas_mcu_probe,
+};
+module_i2c_driver(khadas_mcu_driver);
+
+MODULE_DESCRIPTION("Khadas MCU core driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
index 34fba06ec705..2537dfade51c 100644
--- a/drivers/mfd/lm3533-ctrlbank.c
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -17,7 +17,6 @@
#define LM3533_MAX_CURRENT_MAX 29800
#define LM3533_MAX_CURRENT_STEP 800
-#define LM3533_BRIGHTNESS_MAX 255
#define LM3533_PWM_MAX 0x3f
#define LM3533_REG_PWM_BASE 0x14
@@ -89,41 +88,33 @@ int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
}
EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
-#define lm3533_ctrlbank_set(_name, _NAME) \
-int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \
-{ \
- u8 reg; \
- int ret; \
- \
- if (val > LM3533_##_NAME##_MAX) \
- return -EINVAL; \
- \
- reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
- ret = lm3533_write(cb->lm3533, reg, val); \
- if (ret) \
- dev_err(cb->dev, "failed to set " #_name "\n"); \
- \
- return ret; \
-} \
-EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
-
-#define lm3533_ctrlbank_get(_name, _NAME) \
-int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \
-{ \
- u8 reg; \
- int ret; \
- \
- reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
- ret = lm3533_read(cb->lm3533, reg, val); \
- if (ret) \
- dev_err(cb->dev, "failed to get " #_name "\n"); \
- \
- return ret; \
-} \
-EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
-
-lm3533_ctrlbank_set(brightness, BRIGHTNESS);
-lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val)
+{
+ u8 reg;
+ int ret;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
+ ret = lm3533_write(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to set brightness\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_brightness);
+
+int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val)
+{
+ u8 reg;
+ int ret;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
+ ret = lm3533_read(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to get brightness\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_brightness);
/*
* PWM-input control mask:
@@ -135,9 +126,36 @@ lm3533_ctrlbank_get(brightness, BRIGHTNESS);
* bit 1 - PWM-input enabled in Zone 0
* bit 0 - PWM-input enabled
*/
-lm3533_ctrlbank_set(pwm, PWM);
-lm3533_ctrlbank_get(pwm, PWM);
+int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val)
+{
+ u8 reg;
+ int ret;
+
+ if (val > LM3533_PWM_MAX)
+ return -EINVAL;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
+ ret = lm3533_write(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to set PWM mask\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_pwm);
+
+int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val)
+{
+ u8 reg;
+ int ret;
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
+ ret = lm3533_read(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to get PWM mask\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_pwm);
MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
MODULE_DESCRIPTION("LM3533 Control Bank interface");
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index 873c608e6a5d..858c9e0a49a4 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Keerthy <j-keerthy@ti.com>
*
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 4a5c8ade4ae0..2268be9113f1 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Keerthy <j-keerthy@ti.com>
*/
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 7e0835cb062b..8a8d733fdce5 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -44,7 +44,10 @@ static const char * const madera_core_supplies[] = {
};
static const struct mfd_cell madera_ldo1_devs[] = {
- { .name = "madera-ldo1" },
+ {
+ .name = "madera-ldo1",
+ .level = MFD_DEP_LEVEL_HIGH,
+ },
};
static const char * const cs47l15_supplies[] = {
@@ -55,8 +58,8 @@ static const char * const cs47l15_supplies[] = {
static const struct mfd_cell cs47l15_devs[] = {
{ .name = "madera-pinctrl", },
- { .name = "madera-irq" },
- { .name = "madera-gpio" },
+ { .name = "madera-irq", },
+ { .name = "madera-gpio", },
{
.name = "madera-extcon",
.parent_supplies = cs47l15_supplies,
@@ -108,7 +111,7 @@ static const char * const cs47l85_supplies[] = {
static const struct mfd_cell cs47l85_devs[] = {
{ .name = "madera-pinctrl", },
{ .name = "madera-irq", },
- { .name = "madera-micsupp" },
+ { .name = "madera-micsupp", },
{ .name = "madera-gpio", },
{
.name = "madera-extcon",
@@ -155,10 +158,10 @@ static const char * const cs47l92_supplies[] = {
};
static const struct mfd_cell cs47l92_devs[] = {
- { .name = "madera-pinctrl" },
+ { .name = "madera-pinctrl", },
{ .name = "madera-irq", },
{ .name = "madera-micsupp", },
- { .name = "madera-gpio" },
+ { .name = "madera-gpio", },
{
.name = "madera-extcon",
.parent_supplies = cs47l92_supplies,
@@ -743,18 +746,22 @@ int madera_dev_exit(struct madera *madera)
/* Prevent any IRQs being serviced while we clean up */
disable_irq(madera->irq);
- /*
- * DCVDD could be supplied by a child node, we must disable it before
- * removing the children, and prevent PM runtime from turning it back on
- */
- pm_runtime_disable(madera->dev);
+ pm_runtime_get_sync(madera->dev);
- clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+ mfd_remove_devices(madera->dev);
+
+ pm_runtime_disable(madera->dev);
regulator_disable(madera->dcvdd);
regulator_put(madera->dcvdd);
- mfd_remove_devices(madera->dev);
+ mfd_remove_devices_late(madera->dev);
+
+ pm_runtime_set_suspended(madera->dev);
+ pm_runtime_put_noidle(madera->dev);
+
+ clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+
madera_enable_hard_reset(madera);
regulator_bulk_disable(madera->num_core_supplies,
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 6b965eb034b6..7df5b9ba5855 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -88,7 +88,6 @@ static int madera_i2c_probe(struct i2c_client *i2c,
if (!madera)
return -ENOMEM;
-
madera->regmap = devm_regmap_init_i2c(i2c, regmap_16bit_config);
if (IS_ERR(madera->regmap)) {
ret = PTR_ERR(madera->regmap);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index fd8864cafd25..be185e9d5f16 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(maxim_charger_currents);
int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits,
unsigned int min_ua, unsigned int max_ua, u8 *dst)
{
- unsigned int current_bits = 0xf;
+ unsigned int current_bits;
if (min_ua > max_ua)
return -EINVAL;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f5a73af60dd4..fc00aaccb5f7 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
@@ -17,8 +18,17 @@
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/regulator/consumer.h>
+static LIST_HEAD(mfd_of_node_list);
+
+struct mfd_of_node_entry {
+ struct list_head list;
+ struct device *dev;
+ struct device_node *np;
+};
+
static struct device_type mfd_dev_type = {
.name = "mfd_device",
};
@@ -107,6 +117,51 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
}
#endif
+static int mfd_match_of_node_to_dev(struct platform_device *pdev,
+ struct device_node *np,
+ const struct mfd_cell *cell)
+{
+#if IS_ENABLED(CONFIG_OF)
+ struct mfd_of_node_entry *of_entry;
+ const __be32 *reg;
+ u64 of_node_addr;
+
+ /* Skip if OF node has previously been allocated to a device */
+ list_for_each_entry(of_entry, &mfd_of_node_list, list)
+ if (of_entry->np == np)
+ return -EAGAIN;
+
+ if (!cell->use_of_reg)
+ /* No of_reg defined - allocate first free compatible match */
+ goto allocate_of_node;
+
+ /* We only care about each node's first defined address */
+ reg = of_get_address(np, 0, NULL, NULL);
+ if (!reg)
+ /* OF node does not contatin a 'reg' property to match to */
+ return -EAGAIN;
+
+ of_node_addr = of_read_number(reg, of_n_addr_cells(np));
+
+ if (cell->of_reg != of_node_addr)
+ /* No match */
+ return -EAGAIN;
+
+allocate_of_node:
+ of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL);
+ if (!of_entry)
+ return -ENOMEM;
+
+ of_entry->dev = &pdev->dev;
+ of_entry->np = np;
+ list_add_tail(&of_entry->list, &mfd_of_node_list);
+
+ pdev->dev.of_node = np;
+ pdev->dev.fwnode = &np->fwnode;
+#endif
+ return 0;
+}
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -115,6 +170,7 @@ static int mfd_add_device(struct device *parent, int id,
struct resource *res;
struct platform_device *pdev;
struct device_node *np = NULL;
+ struct mfd_of_node_entry *of_entry, *tmp;
int ret = -ENOMEM;
int platform_id;
int r;
@@ -149,19 +205,28 @@ static int mfd_add_device(struct device *parent, int id,
if (ret < 0)
goto fail_res;
- if (parent->of_node && cell->of_compatible) {
+ if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
+ /* Ignore 'disabled' devices error free */
if (!of_device_is_available(np)) {
- /* Ignore disabled devices error free */
ret = 0;
goto fail_alias;
}
- pdev->dev.of_node = np;
- pdev->dev.fwnode = &np->fwnode;
+
+ ret = mfd_match_of_node_to_dev(pdev, np, cell);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret)
+ goto fail_alias;
+
break;
}
}
+
+ if (!pdev->dev.of_node)
+ pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ cell->name, platform_id);
}
mfd_acpi_add_device(cell, pdev);
@@ -170,13 +235,13 @@ static int mfd_add_device(struct device *parent, int id,
ret = platform_device_add_data(pdev,
cell->platform_data, cell->pdata_size);
if (ret)
- goto fail_alias;
+ goto fail_of_entry;
}
if (cell->properties) {
ret = platform_device_add_properties(pdev, cell->properties);
if (ret)
- goto fail_alias;
+ goto fail_of_entry;
}
for (r = 0; r < cell->num_resources; r++) {
@@ -213,18 +278,18 @@ static int mfd_add_device(struct device *parent, int id,
if (has_acpi_companion(&pdev->dev)) {
ret = acpi_check_resource_conflict(&res[r]);
if (ret)
- goto fail_alias;
+ goto fail_of_entry;
}
}
}
ret = platform_device_add_resources(pdev, res, cell->num_resources);
if (ret)
- goto fail_alias;
+ goto fail_of_entry;
ret = platform_device_add(pdev);
if (ret)
- goto fail_alias;
+ goto fail_of_entry;
if (cell->pm_runtime_no_callbacks)
pm_runtime_no_callbacks(&pdev->dev);
@@ -233,6 +298,12 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
+fail_of_entry:
+ list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
+ if (of_entry->dev == &pdev->dev) {
+ list_del(&of_entry->list);
+ kfree(of_entry);
+ }
fail_alias:
regulator_bulk_unregister_supply_alias(&pdev->dev,
cell->parent_supplies,
@@ -287,6 +358,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
{
struct platform_device *pdev;
const struct mfd_cell *cell;
+ int *level = data;
if (dev->type != &mfd_dev_type)
return 0;
@@ -294,6 +366,9 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
pdev = to_platform_device(dev);
cell = mfd_get_cell(pdev);
+ if (level && cell->level > *level)
+ return 0;
+
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
@@ -301,9 +376,19 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
return 0;
}
+void mfd_remove_devices_late(struct device *parent)
+{
+ int level = MFD_DEP_LEVEL_HIGH;
+
+ device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
+}
+EXPORT_SYMBOL(mfd_remove_devices_late);
+
void mfd_remove_devices(struct device *parent)
{
- device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn);
+ int level = MFD_DEP_LEVEL_NORMAL;
+
+ device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
}
EXPORT_SYMBOL(mfd_remove_devices);
@@ -318,6 +403,16 @@ static void devm_mfd_dev_release(struct device *dev, void *res)
* Returns 0 on success or an appropriate negative error number on failure.
* All child-devices of the MFD will automatically be removed when it gets
* unbinded.
+ *
+ * @dev: Pointer to parent device.
+ * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
+ * of device numbering, or will be added to a device's cell_id.
+ * @cells: Array of (struct mfd_cell)s describing child devices.
+ * @n_devs: Number of child devices to register.
+ * @mem_base: Parent register range resource for child devices.
+ * @irq_base: Base of the range of virtual interrupt numbers allocated for
+ * this MFD device. Unused if @domain is specified.
+ * @domain: Interrupt domain to create mappings for hardware interrupts.
*/
int devm_mfd_add_devices(struct device *dev, int id,
const struct mfd_cell *cells, int n_devs,
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 52f38e57cdc1..2283d88adcc2 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -214,6 +214,28 @@ static const struct regmap_config cpcap_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+#ifdef CONFIG_PM_SLEEP
+static int cpcap_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ disable_irq(spi->irq);
+
+ return 0;
+}
+
+static int cpcap_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ enable_irq(spi->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpcap_pm, cpcap_suspend, cpcap_resume);
+
static const struct mfd_cell cpcap_mfd_devices[] = {
{
.name = "cpcap_adc",
@@ -313,6 +335,7 @@ static struct spi_driver cpcap_driver = {
.driver = {
.name = "cpcap-core",
.of_match_table = cpcap_of_match,
+ .pm = &cpcap_pm,
},
.probe = cpcap_probe,
};
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 5bef142c4835..111d11fd25aa 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -172,7 +172,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
MXS_LRADC_TOUCHSCREEN_5WIRE;
break;
}
- /* fall through - to an error message for i.MX23 */
+ fallthrough; /* to an error message for i.MX23 */
default:
dev_err(&pdev->dev,
"Unsupported number of touchscreen wires (%d)\n"
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1f4f01b02d98..2a3a240b4619 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -2,7 +2,7 @@
/**
* omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI
*
- * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
* Author: Roger Quadros <rogerq@ti.com>
*/
@@ -120,7 +120,7 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg)
/*-------------------------------------------------------------------------*/
-/**
+/*
* Map 'enum usbhs_omap_port_mode' found in <linux/platform_data/usb-omap.h>
* to the device tree binding portN-mode found in
* 'Documentation/devicetree/bindings/mfd/omap-usb-host.txt'
@@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev)
i, r);
}
}
- /* Fall through - as HSIC mode needs utmi_clk */
+ fallthrough; /* as HSIC mode needs utmi_clk */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i])) {
@@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev)
if (!IS_ERR(omap->hsic480m_clk[i]))
clk_disable_unprepare(omap->hsic480m_clk[i]);
- /* Fall through - as utmi_clks were used in HSIC mode */
+ fallthrough; /* as utmi_clks were used in HSIC mode */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i]))
@@ -526,6 +526,8 @@ static const struct of_device_id usbhs_child_match_table[] = {
* usbhs_omap_probe - initialize TI-based HCDs
*
* Allocates basic resources for this USB host controller.
+ *
+ * @pdev: Pointer to this device's platform device structure
*/
static int usbhs_omap_probe(struct platform_device *pdev)
{
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 4b7f73c317e8..16fad79c73f1 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -2,7 +2,7 @@
/**
* omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI
*
- * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
* Author: Roger Quadros <rogerq@ti.com>
*/
@@ -199,6 +199,8 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
* usbtll_omap_probe - initialize TI-based HCDs
*
* Allocates basic resources for this USB host controller.
+ *
+ * @pdev: Pointer to this device's platform device structure
*/
static int usbtll_omap_probe(struct platform_device *pdev)
{
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 26c7b63e008a..545196c85b5c 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -96,7 +96,7 @@ struct rave_sp_deframer {
* @data: Buffer to store reply payload in
* @code: Expected reply code
* @ackid: Expected reply ACK ID
- * @completion: Successful reply reception completion
+ * @received: Successful reply reception completion
*/
struct rave_sp_reply {
size_t length;
@@ -270,7 +270,7 @@ static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
case RAVE_SP_ETX:
case RAVE_SP_DLE:
*dest++ = RAVE_SP_DLE;
- /* FALLTHROUGH */
+ fallthrough;
default:
*dest++ = byte;
}
@@ -541,7 +541,7 @@ static int rave_sp_receive_buf(struct serdev_device *serdev,
* deframer buffer
*/
- /* FALLTHROUGH */
+ fallthrough;
case RAVE_SP_EXPECT_ESCAPED_DATA:
if (deframer->length == sizeof(deframer->data)) {
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 232de50562f9..e25407ed3ad4 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -44,6 +44,9 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_INTMON:
case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2:
case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR:
+ case RN5T618_CHGSTATE:
+ case RN5T618_CHGCTRL_IRR ... RN5T618_CHGERR_MONI:
+ case RN5T618_CONTROL ... RN5T618_CC_AVEREG0:
return true;
default:
return false;
@@ -77,7 +80,7 @@ static const struct regmap_irq_chip rc5t619_irq_chip = {
.mask_invert = true,
};
-static struct rn5t618 *rn5t618_pm_power_off;
+static struct i2c_client *rn5t618_pm_power_off;
static struct notifier_block rn5t618_restart_handler;
static int rn5t618_irq_init(struct rn5t618 *rn5t618)
@@ -110,13 +113,38 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
static void rn5t618_trigger_poweroff_sequence(bool repower)
{
+ int ret;
+
/* disable automatic repower-on */
- regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_REPCNT,
- RN5T618_REPCNT_REPWRON,
- repower ? RN5T618_REPCNT_REPWRON : 0);
+ ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_REPCNT);
+ if (ret < 0)
+ goto err;
+
+ ret &= ~RN5T618_REPCNT_REPWRON;
+ if (repower)
+ ret |= RN5T618_REPCNT_REPWRON;
+
+ ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off,
+ RN5T618_REPCNT, (u8)ret);
+ if (ret < 0)
+ goto err;
+
/* start power-off sequence */
- regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_SLPCNT,
- RN5T618_SLPCNT_SWPWROFF, RN5T618_SLPCNT_SWPWROFF);
+ ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_SLPCNT);
+ if (ret < 0)
+ goto err;
+
+ ret |= RN5T618_SLPCNT_SWPWROFF;
+
+ ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off,
+ RN5T618_SLPCNT, (u8)ret);
+ if (ret < 0)
+ goto err;
+
+ return;
+
+err:
+ dev_alert(&rn5t618_pm_power_off->dev, "Failed to shutdown (err = %d)\n", ret);
}
static void rn5t618_power_off(void)
@@ -189,7 +217,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
return ret;
}
- rn5t618_pm_power_off = priv;
+ rn5t618_pm_power_off = i2c;
if (of_device_is_system_power_controller(i2c->dev.of_node)) {
if (!pm_power_off)
pm_power_off = rn5t618_power_off;
@@ -211,9 +239,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
static int rn5t618_i2c_remove(struct i2c_client *i2c)
{
- struct rn5t618 *priv = i2c_get_clientdata(i2c);
-
- if (priv == rn5t618_pm_power_off) {
+ if (i2c == rn5t618_pm_power_off) {
rn5t618_pm_power_off = NULL;
pm_power_off = NULL;
}
diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c
index 4a09ce9609c9..d15b3e783369 100644
--- a/drivers/mfd/si476x-cmd.c
+++ b/drivers/mfd/si476x-cmd.c
@@ -241,13 +241,13 @@ static int si476x_core_parse_and_nag_about_error(struct si476x_core *core)
/**
* si476x_core_send_command() - sends a command to si476x and waits its
* response
- * @core: si476x_device structure for the device we are
+ * @core: si476x_device structure for the device we are
* communicating with
* @command: command id
* @args: command arguments we are sending
* @argn: actual size of @args
- * @response: buffer to place the expected response from the device
- * @respn: actual size of @response
+ * @resp: buffer to place the expected response from the device
+ * @respn: actual size of @resp
* @usecs: amount of time to wait before reading the response (in
* usecs)
*
@@ -496,7 +496,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
* enable 1MOhm pulldown
* SI476X_DFS_DAUDIO - set the pin to be a part of digital
* audio interface
- * @dout - DOUT pin function configuration:
+ * @dout: - DOUT pin function configuration:
* SI476X_DOUT_NOOP - do not modify the behaviour
* SI476X_DOUT_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
* port 1
* SI476X_DOUT_I2S_INPUT - set this pin to be digital in on I2S
* port 1
- * @xout - XOUT pin function configuration:
+ * @xout: - XOUT pin function configuration:
* SI476X_XOUT_NOOP - do not modify the behaviour
* SI476X_XOUT_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -540,25 +540,25 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg);
/**
* si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND'
- * @core - device to send the command to
- * @iqclk - IQCL pin function configuration:
+ * @core: - device to send the command to
+ * @iqclk: - IQCL pin function configuration:
* SI476X_IQCLK_NOOP - do not modify the behaviour
* SI476X_IQCLK_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_IQCLK_IQ - set pin to be a part of I/Q interace
* in master mode
- * @iqfs - IQFS pin function configuration:
+ * @iqfs: - IQFS pin function configuration:
* SI476X_IQFS_NOOP - do not modify the behaviour
* SI476X_IQFS_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_IQFS_IQ - set pin to be a part of I/Q interace
* in master mode
- * @iout - IOUT pin function configuration:
+ * @iout: - IOUT pin function configuration:
* SI476X_IOUT_NOOP - do not modify the behaviour
* SI476X_IOUT_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_IOUT_OUTPUT - set pin to be I out
- * @qout - QOUT pin function configuration:
+ * @qout: - QOUT pin function configuration:
* SI476X_QOUT_NOOP - do not modify the behaviour
* SI476X_QOUT_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -590,29 +590,29 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg);
/**
* si476x_cmd_ic_link_gpo_ctl_pin_cfg - send
* 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device
- * @core - device to send the command to
- * @icin - ICIN pin function configuration:
+ * @core: - device to send the command to
+ * @icin: - ICIN pin function configuration:
* SI476X_ICIN_NOOP - do not modify the behaviour
* SI476X_ICIN_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_ICIN_GPO1_HIGH - set pin to be an output, drive it high
* SI476X_ICIN_GPO1_LOW - set pin to be an output, drive it low
* SI476X_ICIN_IC_LINK - set the pin to be a part of Inter-Chip link
- * @icip - ICIP pin function configuration:
+ * @icip: - ICIP pin function configuration:
* SI476X_ICIP_NOOP - do not modify the behaviour
* SI476X_ICIP_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_ICIP_GPO1_HIGH - set pin to be an output, drive it high
* SI476X_ICIP_GPO1_LOW - set pin to be an output, drive it low
* SI476X_ICIP_IC_LINK - set the pin to be a part of Inter-Chip link
- * @icon - ICON pin function configuration:
+ * @icon: - ICON pin function configuration:
* SI476X_ICON_NOOP - do not modify the behaviour
* SI476X_ICON_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_ICON_I2S - set the pin to be a part of audio
* interface in slave mode (DCLK)
* SI476X_ICON_IC_LINK - set the pin to be a part of Inter-Chip link
- * @icop - ICOP pin function configuration:
+ * @icop: - ICOP pin function configuration:
* SI476X_ICOP_NOOP - do not modify the behaviour
* SI476X_ICOP_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -647,8 +647,8 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg);
/**
* si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the
* device
- * @core - device to send the command to
- * @lrout - LROUT pin function configuration:
+ * @core: - device to send the command to
+ * @lrout: - LROUT pin function configuration:
* SI476X_LROUT_NOOP - do not modify the behaviour
* SI476X_LROUT_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -675,15 +675,15 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg);
/**
* si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device
- * @core - device to send the command to
- * @intb - INTB pin function configuration:
+ * @core: - device to send the command to
+ * @intb: - INTB pin function configuration:
* SI476X_INTB_NOOP - do not modify the behaviour
* SI476X_INTB_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
* SI476X_INTB_DAUDIO - set pin to be a part of digital
* audio interface in slave mode
* SI476X_INTB_IRQ - set pin to be an interrupt request line
- * @a1 - A1 pin function configuration:
+ * @a1: - A1 pin function configuration:
* SI476X_A1_NOOP - do not modify the behaviour
* SI476X_A1_TRISTATE - put the pin in tristate condition,
* enable 1MOhm pulldown
@@ -728,14 +728,10 @@ static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core,
/**
* si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the
* device
- * @core - device to send the command to
- * @rsqack - if set command clears RSQINT, SNRINT, SNRLINT, RSSIHINT,
- * RSSSILINT, BLENDINT, MULTHINT and MULTLINT
- * @attune - when set the values in the status report are the values
- * that were calculated at tune
- * @cancel - abort ongoing seek/tune opertation
- * @stcack - clear the STCINT bin in status register
- * @report - all signal quality information retured by the command
+ * @core: - device to send the command to
+ * @rsqargs: - pointer to a structure containing a group of sub-args
+ * relevant to sending the RSQ status command
+ * @report: - all signal quality information retured by the command
* (if NULL then the output of the command is ignored)
*
* Function returns 0 on success and negative error code on failure
@@ -862,9 +858,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status);
/**
* si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the
* device
- * @core - device to send the command to
- * @seekup - if set the direction of the search is 'up'
- * @wrap - if set seek wraps when hitting band limit
+ * @core: - device to send the command to
+ * @seekup: - if set the direction of the search is 'up'
+ * @wrap: - if set seek wraps when hitting band limit
*
* This function begins search for a valid station. The station is
* considered valid when 'FM_VALID_SNR_THRESHOLD' and
@@ -890,12 +886,14 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start);
/**
* si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the
* device
- * @core - device to send the command to
- * @status_only - if set the data is not removed from RDSFIFO,
+ * @core: - device to send the command to
+ * @status_only: - if set the data is not removed from RDSFIFO,
* RDSFIFOUSED is not decremented and data in all the
* rest RDS data contains the last valid info received
- * @mtfifo if set the command clears RDS receive FIFO
- * @intack if set the command clards the RDSINT bit.
+ * @mtfifo: if set the command clears RDS receive FIFO
+ * @intack: if set the command clards the RDSINT bit.
+ * @report: - all signal quality information retured by the command
+ * (if NULL then the output of the command is ignored)
*
* Function returns 0 on success and negative error code on failure
*/
@@ -1036,9 +1034,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status);
/**
* si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the
* device
- * @core - device to send the command to
- * @seekup - if set the direction of the search is 'up'
- * @wrap - if set seek wraps when hitting band limit
+ * @core: - device to send the command to
+ * @seekup: - if set the direction of the search is 'up'
+ * @wrap: - if set seek wraps when hitting band limit
*
* This function begins search for a valid station. The station is
* considered valid when 'FM_VALID_SNR_THRESHOLD' and
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index c8d28b844def..c1d7b845244e 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -534,6 +534,11 @@ static irqreturn_t si476x_core_interrupt(int irq, void *dev)
/**
* si476x_firmware_version_to_revision()
* @core: Core device structure
+ * @func: Selects the boot function of the device:
+ * *_BOOTLOADER - Boot loader
+ * *_FM_RECEIVER - FM receiver
+ * *_AM_RECEIVER - AM receiver
+ * *_WB_RECEIVER - Weatherband receiver
* @major: Firmware major number
* @minor1: Firmware first minor number
* @minor2: Firmware second minor number
@@ -583,7 +588,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core,
goto unknown_revision;
}
case SI476X_FUNC_BOOTLOADER:
- default: /* FALLTHROUG */
+ default: /* FALLTHROUGH */
BUG();
return -1;
}
diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c
index 76eedfae8553..3ad35bf0c015 100644
--- a/drivers/mfd/sky81452.c
+++ b/drivers/mfd/sky81452.c
@@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client,
memset(cells, 0, sizeof(cells));
cells[0].name = "sky81452-backlight";
cells[0].of_compatible = "skyworks,sky81452-backlight";
- cells[0].platform_data = pdata->bl_pdata;
- cells[0].pdata_size = sizeof(*pdata->bl_pdata);
cells[1].name = "sky81452-regulator";
cells[1].platform_data = pdata->regulator_init_data;
cells[1].pdata_size = sizeof(*pdata->regulator_init_data);
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
deleted file mode 100644
index 57b792eb58fd..000000000000
--- a/drivers/mfd/smsc-ece1099.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * TI SMSC MFD Driver
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; GPL v2.
- *
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/irq.h>
-#include <linux/regmap.h>
-#include <linux/err.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/smsc.h>
-#include <linux/of_platform.h>
-
-static const struct regmap_config smsc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = SMSC_VEN_ID_H,
- .cache_type = REGCACHE_RBTREE,
-};
-
-static int smsc_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct smsc *smsc;
- int devid, rev, venid_l, venid_h;
- int ret;
-
- smsc = devm_kzalloc(&i2c->dev, sizeof(*smsc), GFP_KERNEL);
- if (!smsc)
- return -ENOMEM;
-
- smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config);
- if (IS_ERR(smsc->regmap))
- return PTR_ERR(smsc->regmap);
-
- i2c_set_clientdata(i2c, smsc);
- smsc->dev = &i2c->dev;
-
-#ifdef CONFIG_OF
- of_property_read_u32(i2c->dev.of_node, "clock", &smsc->clk);
-#endif
-
- regmap_read(smsc->regmap, SMSC_DEV_ID, &devid);
- regmap_read(smsc->regmap, SMSC_DEV_REV, &rev);
- regmap_read(smsc->regmap, SMSC_VEN_ID_L, &venid_l);
- regmap_read(smsc->regmap, SMSC_VEN_ID_H, &venid_h);
-
- dev_info(&i2c->dev, "SMSCxxx devid: %02x rev: %02x venid: %02x\n",
- devid, rev, (venid_h << 8) | venid_l);
-
- ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk);
- if (ret)
- return ret;
-
-#ifdef CONFIG_OF
- if (i2c->dev.of_node)
- ret = devm_of_platform_populate(&i2c->dev);
-#endif
-
- return ret;
-}
-
-static const struct i2c_device_id smsc_i2c_id[] = {
- { "smscece1099", 0},
- {},
-};
-
-static struct i2c_driver smsc_i2c_driver = {
- .driver = {
- .name = "smsc",
- },
- .probe = smsc_i2c_probe,
- .id_table = smsc_i2c_id,
-};
-builtin_i2c_driver(smsc_i2c_driver);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 33336cde4724..f8a8b918c60d 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -7,7 +7,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/sc27xx-pmic.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <uapi/linux/usb/charger.h>
@@ -93,73 +95,6 @@ enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev)
}
EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type);
-static const struct mfd_cell sprd_pmic_devs[] = {
- {
- .name = "sc27xx-wdt",
- .of_compatible = "sprd,sc2731-wdt",
- }, {
- .name = "sc27xx-rtc",
- .of_compatible = "sprd,sc2731-rtc",
- }, {
- .name = "sc27xx-charger",
- .of_compatible = "sprd,sc2731-charger",
- }, {
- .name = "sc27xx-chg-timer",
- .of_compatible = "sprd,sc2731-chg-timer",
- }, {
- .name = "sc27xx-fast-chg",
- .of_compatible = "sprd,sc2731-fast-chg",
- }, {
- .name = "sc27xx-chg-wdt",
- .of_compatible = "sprd,sc2731-chg-wdt",
- }, {
- .name = "sc27xx-typec",
- .of_compatible = "sprd,sc2731-typec",
- }, {
- .name = "sc27xx-flash",
- .of_compatible = "sprd,sc2731-flash",
- }, {
- .name = "sc27xx-eic",
- .of_compatible = "sprd,sc2731-eic",
- }, {
- .name = "sc27xx-efuse",
- .of_compatible = "sprd,sc2731-efuse",
- }, {
- .name = "sc27xx-thermal",
- .of_compatible = "sprd,sc2731-thermal",
- }, {
- .name = "sc27xx-adc",
- .of_compatible = "sprd,sc2731-adc",
- }, {
- .name = "sc27xx-audio-codec",
- .of_compatible = "sprd,sc2731-audio-codec",
- }, {
- .name = "sc27xx-regulator",
- .of_compatible = "sprd,sc2731-regulator",
- }, {
- .name = "sc27xx-vibrator",
- .of_compatible = "sprd,sc2731-vibrator",
- }, {
- .name = "sc27xx-keypad-led",
- .of_compatible = "sprd,sc2731-keypad-led",
- }, {
- .name = "sc27xx-bltc",
- .of_compatible = "sprd,sc2731-bltc",
- }, {
- .name = "sc27xx-fgu",
- .of_compatible = "sprd,sc2731-fgu",
- }, {
- .name = "sc27xx-7sreset",
- .of_compatible = "sprd,sc2731-7sreset",
- }, {
- .name = "sc27xx-poweroff",
- .of_compatible = "sprd,sc2731-poweroff",
- }, {
- .name = "sc27xx-syscon",
- .of_compatible = "sprd,sc2731-syscon",
- },
-};
-
static int sprd_pmic_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
@@ -250,10 +185,8 @@ static int sprd_pmic_probe(struct spi_device *spi)
return -ENOMEM;
ddata->irq_chip.irqs = ddata->irqs;
- for (i = 0; i < pdata->num_irqs; i++) {
- ddata->irqs[i].reg_offset = i / pdata->num_irqs;
- ddata->irqs[i].mask = BIT(i % pdata->num_irqs);
- }
+ for (i = 0; i < pdata->num_irqs; i++)
+ ddata->irqs[i].mask = BIT(i);
ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
@@ -263,12 +196,9 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
- ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO,
- sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs),
- NULL, 0,
- regmap_irq_get_domain(ddata->irq_data));
+ ret = devm_of_platform_populate(&spi->dev);
if (ret) {
- dev_err(&spi->dev, "Failed to register device %d\n", ret);
+ dev_err(&spi->dev, "Failed to populate sub-devices %d\n", ret);
return ret;
}
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index a00f99f36559..746e51a17cc8 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -17,6 +17,7 @@ static const struct regmap_config stm32_lptimer_regmap_cfg = {
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = STM32_LPTIM_MAX_REGISTER,
+ .fast_io = true,
};
static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 3a97816d0cba..df5cebb372a5 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -95,18 +95,20 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
break;
default:
pr_err("Failed to retrieve valid hwlock: %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto err_regmap;
}
}
- syscon_config.name = of_node_full_name(np);
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
+ (u64)res.start);
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
regmap = regmap_init_mmio(NULL, base, &syscon_config);
+ kfree(syscon_config.name);
if (IS_ERR(regmap)) {
pr_err("regmap init failed\n");
ret = PTR_ERR(regmap);
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 67c9995bb1aa..7882a37ffc35 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -18,7 +18,7 @@
#include <linux/mfd/tc3589x.h>
#include <linux/err.h>
-/**
+/*
* enum tc3589x_version - indicates the TC3589x version
*/
enum tc3589x_version {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 926c289cb040..0e6e25308190 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -1,7 +1,7 @@
/*
* TI Touch Screen / ADC MFD driver
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 65fcc58c02da..7e7dbee58ca9 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -404,7 +404,6 @@ static void tps65010_work(struct work_struct *work)
tps65010_interrupt(tps);
if (test_and_clear_bit(FLAG_VBUS_CHANGED, &tps->flags)) {
- int status;
u8 chgconfig, tmp;
chgconfig = i2c_smbus_read_byte_data(tps->client,
@@ -415,8 +414,8 @@ static void tps65010_work(struct work_struct *work)
else if (tps->vbus >= 100)
chgconfig |= TPS_VBUS_CHARGING;
- status = i2c_smbus_write_byte_data(tps->client,
- TPS_CHGCONFIG, chgconfig);
+ i2c_smbus_write_byte_data(tps->client,
+ TPS_CHGCONFIG, chgconfig);
/* vbus update fails unless VBUS is connected! */
tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG);
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 43119a6867fe..341466ef20cc 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 7566ce4457a0..2d9c282ec917 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -3,7 +3,7 @@
*
* TPS65217 chip family multi-function driver
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -205,7 +205,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_read);
/**
* tps65217_reg_write: Write a single tps65217 register.
*
- * @tps65217: Device to write to.
+ * @tps: Device to write to.
* @reg: Register to write to.
* @val: Value to write.
* @level: Password protected level
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
/**
* tps65217_update_bits: Modify bits w.r.t mask, val and level.
*
- * @tps65217: Device to write to.
+ * @tps: Device to write to.
* @reg: Register to read-write to.
* @mask: Mask.
* @val: Value to write.
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index a62ea4cb8be7..167e9fc308ef 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -1,7 +1,7 @@
/*
* Driver for TPS65218 Integrated power management chipsets
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2 as
@@ -48,7 +48,7 @@ static const struct mfd_cell tps65218_cells[] = {
/**
* tps65218_reg_write: Write a single tps65218 register.
*
- * @tps65218: Device to write to.
+ * @tps: Device to write to.
* @reg: Register to write to.
* @val: Value to write.
* @level: Password protected level
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(tps65218_reg_write);
/**
* tps65218_update_bits: Modify bits w.r.t mask, val and level.
*
- * @tps65218: Device to write to.
+ * @tps: Device to write to.
* @reg: Register to read-write to.
* @mask: Mask.
* @val: Value to write.
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index c8aadd39324e..c36597797ddd 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -309,18 +309,19 @@ static const struct irq_domain_ops tps6586x_domain_ops = {
static irqreturn_t tps6586x_irq(int irq, void *data)
{
struct tps6586x *tps6586x = data;
- u32 acks;
+ uint32_t acks;
+ __le32 val;
int ret = 0;
ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1,
- sizeof(acks), (uint8_t *)&acks);
+ sizeof(acks), (uint8_t *)&val);
if (ret < 0) {
dev_err(tps6586x->dev, "failed to read interrupt status\n");
return IRQ_NONE;
}
- acks = le32_to_cpu(acks);
+ acks = le32_to_cpu(val);
while (acks) {
int i = __ffs(acks);
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index f33567bc428d..b55b1d5d6955 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -1,7 +1,7 @@
/*
* Core functions for TI TPS65912x PMICs
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 785d19f6f7c9..f7c22ea7b36c 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -1,7 +1,7 @@
/*
* I2C access driver for TI TPS65912x PMICs
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index f78be039e463..21a8d6ac5c4a 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -1,7 +1,7 @@
/*
* SPI access driver for TI TPS65912x PMICs
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 910a304b397c..ab417438d1fa 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -477,7 +477,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
if (agent->imr_change_pending) {
union {
- u32 word;
+ __le32 word;
u8 bytes[4];
} imr;
@@ -561,7 +561,7 @@ static inline int sih_read_isr(const struct sih *sih)
int status;
union {
u8 bytes[4];
- u32 word;
+ __le32 word;
} isr;
/* FIXME need retry-on-error ... */
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 02f879b23d9f..b0344e5353e4 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -114,6 +114,8 @@ static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
* The WM831x has a user key preventing writes to particularly
* critical registers. This function locks those registers,
* allowing writes to them.
+ *
+ * @wm831x: pointer to local driver data structure
*/
void wm831x_reg_lock(struct wm831x *wm831x)
{
@@ -140,6 +142,8 @@ EXPORT_SYMBOL_GPL(wm831x_reg_lock);
* The WM831x has a user key preventing writes to particularly
* critical registers. This function locks those registers,
* preventing spurious writes.
+ *
+ * @wm831x: pointer to local driver data structure
*/
int wm831x_reg_unlock(struct wm831x *wm831x)
{
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 42b16503e6cd..fbc77b218215 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -131,6 +131,8 @@ EXPORT_SYMBOL_GPL(wm8350_block_write);
* The WM8350 has a hardware lock which can be used to prevent writes to
* some registers (generally those which can cause particularly serious
* problems if misused). This function enables that lock.
+ *
+ * @wm8350: pointer to local driver data structure
*/
int wm8350_reg_lock(struct wm8350 *wm8350)
{
@@ -160,6 +162,8 @@ EXPORT_SYMBOL_GPL(wm8350_reg_lock);
* problems if misused). This function disables that lock so updates
* can be performed. For maximum safety this should be done only when
* required.
+ *
+ * @wm8350: pointer to local driver data structure
*/
int wm8350_reg_unlock(struct wm8350 *wm8350)
{
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 3055d6f47afc..0fe32a05421b 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -108,6 +108,8 @@ static const struct regmap_config wm8400_regmap_config = {
/**
* wm8400_reset_codec_reg_cache - Reset cached codec registers to
* their default values.
+ *
+ * @wm8400: pointer to local driver data structure
*/
void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
{
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index cde9a2fc1325..ed8d38b09925 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -90,10 +90,10 @@ static int at25_ee_read(void *priv, unsigned int offset,
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
- /* fall through */
+ fallthrough;
case 2:
*cp++ = offset >> 8;
- /* fall through */
+ fallthrough;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
@@ -178,10 +178,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
- /* fall through */
+ fallthrough;
case 2:
*cp++ = offset >> 8;
- /* fall through */
+ fallthrough;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
@@ -278,7 +278,7 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
switch (val) {
case 9:
chip->flags |= EE_INSTR_BIT3_IS_ADDR;
- /* fall through */
+ fallthrough;
case 8:
chip->flags |= EE_ADDR1;
break;
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 7c38c4f7f9c0..a8004911c977 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/genalloc.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
@@ -300,7 +301,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
struct hl_device *hdev = hpriv->hdev;
struct hl_cb *cb;
phys_addr_t address;
- u32 handle;
+ u32 handle, user_cb_size;
int rc;
handle = vma->vm_pgoff;
@@ -314,7 +315,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
}
/* Validation check */
- if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
+ user_cb_size = vma->vm_end - vma->vm_start;
+ if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
dev_err(hdev->dev,
"CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
vma->vm_end - vma->vm_start, cb->size);
@@ -322,6 +324,16 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
goto put_cb;
}
+ if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
+ user_cb_size)) {
+ dev_err(hdev->dev,
+ "user pointer is invalid - 0x%lx\n",
+ vma->vm_start);
+
+ rc = -EINVAL;
+ goto put_cb;
+ }
+
spin_lock(&cb->lock);
if (cb->mmap) {
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index b9840e368eb5..2e3fcbc794db 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -808,6 +808,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
/* currently it is guaranteed to have only one chunk */
chunk = &cs_chunk_array[0];
+
+ if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
q_type = hw_queue_prop->type;
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index c50c6fc9e905..37701e4f9d5a 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -19,7 +19,7 @@
static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, u32 *val)
+ u8 i2c_reg, long *val)
{
struct armcp_packet pkt;
int rc;
@@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, (long *) val);
+ 0, val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -827,7 +827,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[32];
- u32 val;
+ long val;
ssize_t rc;
if (*ppos)
@@ -842,7 +842,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
return rc;
}
- sprintf(tmp_buf, "0x%02x\n", val);
+ sprintf(tmp_buf, "0x%02lx\n", val);
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index be16b75bdfdb..24b01cce0a38 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -288,7 +288,7 @@ static int device_early_init(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl-free-jobs-%u", i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
- if (hdev->cq_wq == NULL) {
+ if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
@@ -1069,7 +1069,7 @@ again:
goto out_err;
}
- hl_set_max_power(hdev, hdev->max_power);
+ hl_set_max_power(hdev);
} else {
rc = hdev->asic_funcs->soft_reset_late_init(hdev);
if (rc) {
@@ -1318,6 +1318,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto out_disabled;
}
+ /* Need to call this again because the max power might change,
+ * depending on card type for certain ASICs
+ */
+ hl_set_max_power(hdev);
+
/*
* hl_hwmon_init() must be called after device_late_init(), because only
* there we get the information from the device about which
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index f70302cdab1b..f52bc690dfc5 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -13,6 +13,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
+#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
*
@@ -48,6 +49,14 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+ if (fw_size > FW_FILE_MAX_SIZE) {
+ dev_err(hdev->dev,
+ "FW file size %zu exceeds maximum of %u bytes\n",
+ fw_size, FW_FILE_MAX_SIZE);
+ rc = -EINVAL;
+ goto out;
+ }
+
fw_data = (const u64 *) fw->data;
memcpy_toio(dst, fw_data, fw_size);
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 018d9d67e8e6..edbd627b29d2 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -1462,6 +1462,8 @@ struct hl_device_idle_busy_ts {
* details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
+ * @card_type: Various ASICs have several card types. This indicates the card
+ * type of the current device.
* @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues)
* @major: habanalabs kernel driver major.
@@ -1566,6 +1568,7 @@ struct hl_device {
u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
+ enum armcp_card_types card_type;
int cs_active_cnt;
u32 major;
u32 high_pll;
@@ -1651,7 +1654,7 @@ struct hl_ioctl_desc {
*
* Return: true if the area is inside the valid range, false otherwise.
*/
-static inline bool hl_mem_area_inside_range(u64 address, u32 size,
+static inline bool hl_mem_area_inside_range(u64 address, u64 size,
u64 range_start_address, u64 range_end_address)
{
u64 end_address = address + size;
@@ -1858,7 +1861,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
-void hl_set_max_power(struct hl_device *hdev, u64 value);
+void hl_set_max_power(struct hl_device *hdev);
int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index dce9273e557a..5ff4688683fd 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -66,6 +66,11 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
total_size = num_pgs << page_shift;
+ if (!total_size) {
+ dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
+ return -EINVAL;
+ }
+
contiguous = args->flags & HL_MEM_CONTIGUOUS;
if (contiguous) {
@@ -93,7 +98,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
phys_pg_pack->contiguous = contiguous;
phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
- if (!phys_pg_pack->pages) {
+ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto pages_arr_err;
}
@@ -683,7 +688,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
GFP_KERNEL);
- if (!phys_pg_pack->pages) {
+ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
}
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index edcc11d5eaf1..3fc0f497fab3 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -450,7 +450,7 @@ int hl_mmu_init(struct hl_device *hdev)
hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
prop->mmu_hop_table_size,
GFP_KERNEL | __GFP_ZERO);
- if (!hdev->mmu_shadow_hop0) {
+ if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
}
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 7bd3737571f3..2770f03b6cbb 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -227,7 +227,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
}
/* Point to the specified address */
- rc = hl_pci_iatu_write(hdev, offset + 0x14,
+ rc |= hl_pci_iatu_write(hdev, offset + 0x14,
lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x18,
upper_32_bits(pci_region->addr));
@@ -369,15 +369,17 @@ int hl_pci_init(struct hl_device *hdev)
rc = hdev->asic_funcs->init_iatu(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize iATU\n");
- goto disable_device;
+ goto unmap_pci_bars;
}
rc = hl_pci_set_dma_mask(hdev);
if (rc)
- goto disable_device;
+ goto unmap_pci_bars;
return 0;
+unmap_pci_bars:
+ hl_pci_bars_unmap(hdev);
disable_device:
pci_clear_master(pdev);
pci_disable_device(pdev);
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index b3cb0ac4721c..5ae484cc84cd 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -81,7 +81,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
return result;
}
-void hl_set_max_power(struct hl_device *hdev, u64 value)
+void hl_set_max_power(struct hl_device *hdev)
{
struct armcp_packet pkt;
int rc;
@@ -90,7 +90,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.value = cpu_to_le64(value);
+ pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
@@ -316,7 +316,7 @@ static ssize_t max_power_store(struct device *dev,
}
hdev->max_power = value;
- hl_set_max_power(hdev, value);
+ hl_set_max_power(hdev);
out:
return count;
@@ -422,6 +422,7 @@ int hl_sysfs_init(struct hl_device *hdev)
hdev->pm_mng_profile = PM_AUTO;
else
hdev->pm_mng_profile = PM_MANUAL;
+
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 00a0a7238d81..4009b7df4caf 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -154,6 +154,29 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
+static inline bool validate_packet_id(enum packet_id id)
+{
+ switch (id) {
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_CP_DMA:
+ case PACKET_REPEAT:
+ case PACKET_MSG_PROT:
+ case PACKET_FENCE:
+ case PACKET_LIN_DMA:
+ case PACKET_NOP:
+ case PACKET_STOP:
+ case PACKET_ARB_POINT:
+ case PACKET_WAIT:
+ case PACKET_LOAD_AND_EXE:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const char * const
gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
"tpc_address_exceed_slm",
@@ -433,7 +456,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->num_of_events = GAUDI_EVENT_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
- prop->max_power_default = MAX_POWER_DEFAULT;
+ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
@@ -2485,6 +2508,7 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
+ bool enable;
int i;
/* In case we are during debug session, don't enable the clock gate
@@ -2494,46 +2518,43 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
return;
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(gaudi_dma_assignment[i]))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i])));
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
- QMAN_UPPER_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0);
}
for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(gaudi_dma_assignment[i]))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i])));
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
}
- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
- WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- }
+ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0)));
+ WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
+ WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
- WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- }
+ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2)));
+ WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
+ WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)));
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
- QMAN_CGM1_PWR_GATE_EN);
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
qman_offset += TPC_QMAN_OFFSET;
}
@@ -3772,6 +3793,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
@@ -3995,6 +4022,12 @@ static int gaudi_patch_cb(struct hl_device *hdev,
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
@@ -5215,7 +5248,7 @@ static int gaudi_extract_ecc_info(struct hl_device *hdev,
*memory_wrapper_idx = 0xFF;
/* Iterate through memory wrappers, a single bit must be set */
- for (i = 0 ; i > num_mem_regs ; i++) {
+ for (i = 0 ; i < num_mem_regs ; i++) {
err_addr += i * 4;
err_word = RREG32(err_addr);
if (err_word) {
@@ -6022,6 +6055,15 @@ static int gaudi_armcp_info_get(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+ hdev->card_type = le32_to_cpu(hdev->asic_prop.armcp_info.card_type);
+
+ if (hdev->card_type == armcp_card_type_pci)
+ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+ else if (hdev->card_type == armcp_card_type_pmc)
+ prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+
+ hdev->max_power = prop->max_power_default;
+
return 0;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 5dc99f6f0296..82137c3f3e2e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -41,7 +41,8 @@
#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
-#define MAX_POWER_DEFAULT 200000 /* 200W */
+#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */
+#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */
#define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 5673ee49819e..881531d4d9da 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -527,7 +527,7 @@ static int gaudi_config_etf(struct hl_device *hdev,
}
static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
- u32 size, bool *is_host)
+ u64 size, bool *is_host)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -539,6 +539,12 @@ static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
return false;
}
+ if (addr > (addr + size)) {
+ dev_err(hdev->dev,
+ "ETR buffer size %llu overflow\n", size);
+ return false;
+ }
+
/* PMMU and HPMMU addresses are equal, check only one of them */
if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
hl_mem_area_inside_range(addr, size,
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 85030759b2af..33cd2ae653d2 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -139,6 +139,25 @@ static u16 goya_packet_sizes[MAX_PACKET_ID] = {
[PACKET_STOP] = sizeof(struct packet_stop)
};
+static inline bool validate_packet_id(enum packet_id id)
+{
+ switch (id) {
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_CP_DMA:
+ case PACKET_MSG_PROT:
+ case PACKET_FENCE:
+ case PACKET_LIN_DMA:
+ case PACKET_NOP:
+ case PACKET_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
@@ -3455,6 +3474,12 @@ static int goya_validate_cb(struct hl_device *hdev,
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
@@ -3690,6 +3715,12 @@ static int goya_patch_cb(struct hl_device *hdev,
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index b03912483de0..4027a6a334d7 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -362,11 +362,17 @@ static int goya_config_etf(struct hl_device *hdev,
}
static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
- u32 size)
+ u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 range_start, range_end;
+ if (addr > (addr + size)) {
+ dev_err(hdev->dev,
+ "ETR buffer size %llu overflow\n", size);
+ return false;
+ }
+
if (hdev->mmu_enable) {
range_start = prop->dmmu.start_addr;
range_end = prop->dmmu.end_addr;
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index d1d3e025ca0e..9ae9669e46ea 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -546,38 +546,46 @@ static int mei_hdcp_verify_mprime(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct wired_cmd_repeater_auth_stream_req_in
- verify_mprime_in = { { 0 } };
+ struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out
verify_mprime_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
+ size_t cmd_size;
if (!dev || !stream_ready || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
- verify_mprime_in.header.api_version = HDCP_API_VERSION;
- verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
- verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
- verify_mprime_in.header.buffer_len =
+ cmd_size = struct_size(verify_mprime_in, streams, data->k);
+ if (cmd_size == SIZE_MAX)
+ return -EINVAL;
+
+ verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
+ if (!verify_mprime_in)
+ return -ENOMEM;
+
+ verify_mprime_in->header.api_version = HDCP_API_VERSION;
+ verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.buffer_len =
WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
- verify_mprime_in.port.integrated_port_type = data->port_type;
- verify_mprime_in.port.physical_port = (u8)data->fw_ddi;
- verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc;
+ verify_mprime_in->port.integrated_port_type = data->port_type;
+ verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc;
+
+ memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
+ drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
- memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
- HDCP_2_2_MPRIME_LEN);
- drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
- memcpy(verify_mprime_in.streams, data->streams,
+ memcpy(verify_mprime_in->streams, data->streams,
array_size(data->k, sizeof(*data->streams)));
- verify_mprime_in.k = cpu_to_be16(data->k);
+ verify_mprime_in->k = cpu_to_be16(data->k);
- byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in,
- sizeof(verify_mprime_in));
+ byte = mei_cldev_send(cldev, (u8 *)verify_mprime_in, cmd_size);
+ kfree(verify_mprime_in);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 9cc6b2a6cf22..304d6c833712 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -178,7 +178,7 @@ int scif_close(scif_epd_t epd)
case SCIFEP_ZOMBIE:
dev_err(scif_info.mdev.this_device,
"SCIFAPI close: zombie state unexpected\n");
- /* fall through */
+ fallthrough;
case SCIFEP_DISCONNECTED:
spin_unlock(&ep->lock);
scif_unregister_all_windows(epd);
@@ -645,7 +645,7 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
ep->port.port = err;
ep->port.node = scif_info.nodeid;
ep->conn_async_state = ASYNC_CONN_IDLE;
- /* Fall through */
+ fallthrough;
case SCIFEP_BOUND:
/*
* If a non-blocking connect has been already initiated
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index de8f61efaef5..2da3b474f486 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -657,7 +657,7 @@ int scif_unregister_window(struct scif_window *window)
window->unreg_state = OP_IN_PROGRESS;
send_msg = true;
}
- /* fall through */
+ fallthrough;
case OP_IN_PROGRESS:
{
scif_get_window(window, 1);
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index f6e600bfac5d..0ea923fe6371 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -622,7 +622,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
- /* fall through */
+ fallthrough;
default:
BUG();
}
@@ -780,7 +780,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
- /* fall through */
+ fallthrough;
default:
BUG();
}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index d5e097cd556d..8a495dc82f16 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -1173,7 +1173,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
if (!xpc_kdebug_ignore)
break;
- /* fall through */
+ fallthrough;
case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER:
xpc_arch_ops.offline_heartbeat();
@@ -1184,7 +1184,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
if (!xpc_kdebug_ignore)
break;
- /* fall through */
+ fallthrough;
case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE:
xpc_arch_ops.online_heartbeat();
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 21a04bc97d40..099a53bdbb7d 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -441,10 +441,10 @@ xpc_discovery(void)
switch (region_size) {
case 128:
max_regions *= 2;
- /* fall through */
+ fallthrough;
case 64:
max_regions *= 2;
- /* fall through */
+ fallthrough;
case 32:
max_regions *= 2;
region_size = 16;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 98c60f11b76b..7791bde81a36 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -574,7 +574,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
xpc_wakeup_channel_mgr(part);
}
- /* fall through */
+ fallthrough;
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ce43f7573d80..c8fae6611b73 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -191,7 +191,7 @@ int mmc_of_parse(struct mmc_host *host)
switch (bus_width) {
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
- /* fall through - Hosts capable of 8-bit can also do 4 bits */
+ fallthrough; /* Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 300901415aa2..3fc3bbea8536 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2418,7 +2418,7 @@ static void atmci_get_cap(struct atmel_mci *host)
case 0x600:
case 0x500:
host->caps.has_odd_clk_div = 1;
- /* Fall through */
+ fallthrough;
case 0x400:
case 0x300:
host->caps.has_dma_conf_reg = 1;
@@ -2426,16 +2426,16 @@ static void atmci_get_cap(struct atmel_mci *host)
host->caps.has_cfg_reg = 1;
host->caps.has_cstor_reg = 1;
host->caps.has_highspeed = 1;
- /* Fall through */
+ fallthrough;
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
host->caps.need_notbusy_for_read_ops = 1;
- /* Fall through */
+ fallthrough;
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
- /* Fall through */
+ fallthrough;
case 0x0:
break;
default:
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index f01fecd75833..e50a08bce7ef 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -300,7 +300,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
* then it's harmless for us to allow it.
*/
cmd_reg |= MMCCMD_BSYEXP;
- /* FALLTHROUGH */
+ fallthrough;
case MMC_RSP_R1: /* 48 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R1456;
break;
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 50977ff18074..db1a84b2ba61 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -238,7 +238,7 @@ static void dw_mci_hs_set_timing(struct dw_mci *host, int timing,
if (smpl_phase >= USE_DLY_MIN_SMPL &&
smpl_phase <= USE_DLY_MAX_SMPL)
use_smpl_dly = 1;
- /* fallthrough */
+ fallthrough;
case MMC_TIMING_UHS_SDR50:
if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL &&
smpl_phase <= ENABLE_SHIFT_MAX_SMPL)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 35ae5737c622..0fba940544ca 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2030,7 +2030,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
prev_state = state = STATE_SENDING_DATA;
- /* fall through */
+ fallthrough;
case STATE_SENDING_DATA:
/*
@@ -2088,7 +2088,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
prev_state = state = STATE_DATA_BUSY;
- /* fall through */
+ fallthrough;
case STATE_DATA_BUSY:
if (!dw_mci_clear_pending_data_complete(host)) {
@@ -2141,7 +2141,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
*/
prev_state = state = STATE_SENDING_STOP;
- /* fall through */
+ fallthrough;
case STATE_SENDING_STOP:
if (!dw_mci_clear_pending_cmd_complete(host))
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 447552ac25c4..81d71010b474 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -739,7 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
jz_mmc_prepare_data_transfer(host);
- /* fall through */
+ fallthrough;
case JZ4740_MMC_STATE_TRANSFER_DATA:
if (host->use_dma) {
@@ -774,7 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
- /* fall through */
+ fallthrough;
case JZ4740_MMC_STATE_SEND_STOP:
if (!req->stop)
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 9b2cf7afc246..703d5834f9a5 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -294,7 +294,7 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
vdd = 0;
- /* fall through */
+ fallthrough;
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
host->error = mmc_regulator_set_ocr(mmc,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4e2583f69a63..b0c27944db7f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/reset.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
@@ -419,6 +420,7 @@ struct msdc_host {
struct pinctrl_state *pins_uhs;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
@@ -1592,6 +1594,12 @@ static void msdc_init_hw(struct msdc_host *host)
u32 val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ if (host->reset) {
+ reset_control_assert(host->reset);
+ usleep_range(10, 50);
+ reset_control_deassert(host->reset);
+ }
+
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -2390,6 +2398,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (IS_ERR(host->src_clk_cg))
host->src_clk_cg = NULL;
+ host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "hrst");
+ if (IS_ERR(host->reset))
+ return PTR_ERR(host->reset);
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 15e21894bd44..904f5237d8f7 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -685,7 +685,7 @@ static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
case HOST_MODE:
if (host->pdata->flags & TMIO_MMC_HAVE_CBSY)
bit = TMIO_STAT_CMD_BUSY;
- /* fallthrough */
+ fallthrough;
case CTL_SD_CARD_CLK_CTL:
return renesas_sdhi_wait_idle(host, bit);
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 48ecbd0b180d..962f074ca174 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -535,6 +535,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+struct amd_sdhci_host {
+ bool tuned_clock;
+ bool dll_enabled;
+};
+
/* AMD sdhci reset dll register. */
#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
@@ -555,26 +560,66 @@ static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
}
/*
- * For AMD Platform it is required to disable the tuning
- * bit first controller to bring to HS Mode from HS200
- * mode, later enable to tune to HS400 mode.
+ * The initialization sequence for HS400 is:
+ * HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The re-tuning sequence is:
+ * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400
+ * mode. If we switch to a different mode, we need to disable the tuned clock.
+ * If we have previously performed tuning and switch back to HS200 or
+ * HS400, we can re-enable the tuned clock.
+ *
*/
static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
unsigned int old_timing = host->timing;
+ u16 val;
sdhci_set_ios(mmc, ios);
- if (old_timing == MMC_TIMING_MMC_HS200 &&
- ios->timing == MMC_TIMING_MMC_HS)
- sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
- if (old_timing != MMC_TIMING_MMC_HS400 &&
- ios->timing == MMC_TIMING_MMC_HS400) {
- sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
- sdhci_acpi_amd_hs400_dll(host);
+
+ if (old_timing != host->timing && amd_host->tuned_clock) {
+ if (host->timing == MMC_TIMING_MMC_HS400 ||
+ host->timing == MMC_TIMING_MMC_HS200) {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ } else {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ }
+
+ /* DLL is only required for HS400 */
+ if (host->timing == MMC_TIMING_MMC_HS400 &&
+ !amd_host->dll_enabled) {
+ sdhci_acpi_amd_hs400_dll(host);
+ amd_host->dll_enabled = true;
+ }
}
}
+static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
+ amd_host->tuned_clock = false;
+
+ err = sdhci_execute_tuning(mmc, opcode);
+
+ if (!err && !host->tuning_err)
+ amd_host->tuned_clock = true;
+
+ return err;
+}
+
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -602,6 +647,7 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
+ host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
return 0;
}
@@ -613,6 +659,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ .priv_size = sizeof(struct amd_sdhci_host),
};
struct sdhci_acpi_uid_slot {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a76b4513fbec..d738907a622f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1556,7 +1556,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
"failed to request card-detect gpio!\n");
return err;
}
- /* fall through */
+ fallthrough;
case ESDHC_CD_CONTROLLER:
/* we have a working card_detect back */
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index bb6802448b2f..af413805bbf1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -232,6 +232,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc)
sdhci_dumpregs(mmc_priv(mmc));
}
+static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+ sdhci_reset(host, mask);
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -718,7 +726,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = {
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_cqhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 9194bb73e601..080ced1e63f0 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -609,7 +609,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
switch (pdata->max_width) {
case 8:
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
- /* Fall through */
+ fallthrough;
case 4:
host->mmc->caps |= MMC_CAP_4_BIT_DATA;
break;
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index a910cb461ed7..bafa2e41c8b6 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -470,7 +470,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
break;
default:
- /* fall-through */
+ fallthrough;
case MMC_SIGNAL_VOLTAGE_330:
ret = pinctrl_select_state(sprd_host->pinctrl,
sprd_host->pins_default);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0a3f9d024f2a..13fbf70b5fde 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -110,6 +110,12 @@
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
+/*
+ * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
+ * SDMMC hardware data timeout.
+ */
+#define NVQUIRK_HAS_TMCLK BIT(10)
+
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
@@ -140,6 +146,7 @@ struct sdhci_tegra_autocal_offsets {
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
+ struct clk *tmclk;
bool ddr_signaling;
bool pad_calib_required;
bool pad_control_available;
@@ -1418,7 +1425,6 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
@@ -1434,7 +1440,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 106,
.max_tap_delay = 185,
};
@@ -1456,7 +1463,6 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
@@ -1473,6 +1479,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK |
NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
.min_tap_delay = 84,
.max_tap_delay = 136,
@@ -1485,7 +1492,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 96,
.max_tap_delay = 139,
};
@@ -1613,6 +1621,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_power_req;
}
+ /*
+ * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
+ * timeout clock and SW can choose TMCLK or SDCLK for hardware
+ * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
+ * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
+ *
+ * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
+ * 12Mhz TMCLK which is advertised in host capability register.
+ * With TMCLK of 12Mhz provides maximum data timeout period that can
+ * be achieved is 11s better than using SDCLK for data timeout.
+ *
+ * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
+ * supporting separate TMCLK.
+ */
+
+ if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
+ clk = devm_clk_get(&pdev->dev, "tmclk");
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -EPROBE_DEFER)
+ goto err_power_req;
+
+ dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
+ clk = NULL;
+ }
+
+ clk_set_rate(clk, 12000000);
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "failed to enable tmclk: %d\n", rc);
+ goto err_power_req;
+ }
+
+ tegra_host->tmclk = clk;
+ }
+
clk = devm_clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
rc = PTR_ERR(clk);
@@ -1656,6 +1701,7 @@ err_add_host:
err_rst_get:
clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
+ clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
err_parse_dt:
sdhci_pltfm_free(pdev);
@@ -1673,6 +1719,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index e6e9e286cc34..03ce57ef4585 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -527,7 +527,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host,
ret = true;
break;
}
- /* fall through */
+ fallthrough;
default:
reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
ret = false;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3ad394b40eb1..592a55a34b58 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2825,7 +2825,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
case MMC_TIMING_UHS_SDR50:
if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
goto out;
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 5987656e0474..fd8b72d3e02c 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -335,7 +335,7 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
break;
case MMC_RSP_R1B:
rc |= TIFM_MMCSD_RSP_BUSY;
- /* fall-through */
+ fallthrough;
case MMC_RSP_R1:
rc |= TIFM_MMCSD_RSP_R1;
break;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 369b8dee2e3d..7666c90054ae 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1343,7 +1343,7 @@ static int usdhi6_stop_cmd(struct usdhi6_host *host)
host->wait = USDHI6_WAIT_FOR_STOP;
return 0;
}
- /* fall through - Unsupported STOP command. */
+ fallthrough; /* Unsupported STOP command */
default:
dev_err(mmc_dev(host->mmc),
"unsupported stop CMD%d for CMD%d\n",
@@ -1691,7 +1691,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
switch (host->wait) {
default:
dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
- /* fall through - mrq can be NULL, but is impossible. */
+ fallthrough; /* mrq can be NULL, but is impossible */
case USDHI6_WAIT_FOR_CMD:
usdhi6_error_code(host);
if (mrq)
@@ -1713,7 +1713,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
host->offset, data->blocks, data->blksz, data->sg_len,
sg_dma_len(sg), sg->offset);
usdhi6_sg_unmap(host, true);
- /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */
+ fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */
case USDHI6_WAIT_FOR_DATA_END:
usdhi6_error_code(host);
data->error = -ETIMEDOUT;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 83afc00e365a..28f55f9cf715 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -381,6 +381,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
+ if (ubi->fm_next_anchor) {
+ return_unused_peb(ubi, ubi->fm_next_anchor);
+ ubi->fm_next_anchor = NULL;
+ }
+
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 27636063ed1b..42cac572f82d 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1086,7 +1086,8 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
+ if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ e->pnum < UBI_FM_MAX_START) {
/* Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 12466b06692c..22ed051eb1a4 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -93,7 +93,7 @@ static int adgs1408_probe(struct spi_device *spi)
mux->idle_state = idle_state;
break;
}
- /* fall through */
+ fallthrough;
default:
dev_err(dev, "invalid idle-state %d\n", idle_state);
return -EINVAL;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 18428e104445..1c6c27f35ac4 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
dev->irq = cops_irq(ioaddr, board);
if (dev->irq)
break;
- /* fall through - Once no IRQ found on this port. */
+ fallthrough; /* Once no IRQ found on this port */
case 1:
retval = -EINVAL;
goto err_out;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 14a5fb378145..98df38fe553c 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -363,13 +363,13 @@ static int __init arcrimi_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 3: /* Node ID */
node = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index cd27fdc1059b..f983c4ce6b07 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -197,22 +197,22 @@ static int __init com20020isa_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_info("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 6: /* Timeout */
timeout = ints[6];
- /* Fall through */
+ fallthrough;
case 5: /* CKP value */
clockp = ints[5];
- /* Fall through */
+ fallthrough;
case 4: /* Backplane flag */
backplane = ints[4];
- /* Fall through */
+ fallthrough;
case 3: /* Node ID */
node = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 186bbf87bc84..cf214b730671 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -363,10 +363,10 @@ static int __init com90io_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index bd75d06ad7df..3dc3d533cb19 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -693,13 +693,13 @@ static int __init com90xx_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 3: /* Mem address */
shmem = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 31e43a2197a3..aa001b16765a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -130,7 +130,7 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
/**
* __get_first_agg - get the first aggregator in the bond
- * @bond: the bond we're looking at
+ * @port: the port we're looking at
*
* Return the aggregator of the first slave in @bond, or %NULL if it can't be
* found.
@@ -1149,7 +1149,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
- /* Fall Through */
+ fallthrough;
case AD_RX_PORT_DISABLED:
port->sm_vars &= ~AD_PORT_MATCHED;
break;
@@ -1588,7 +1588,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
- /*FALLTHROUGH*/
+ fallthrough;
case BOND_AD_STABLE:
case BOND_AD_BANDWIDTH:
if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best))
@@ -1626,7 +1626,7 @@ static int agg_device_up(const struct aggregator *agg)
/**
* ad_agg_selection_logic - select an aggregation group for a team
- * @aggregator: the aggregator we're looking at
+ * @agg: the aggregator we're looking at
* @update_slave_arr: Does slave array need update?
*
* It is assumed that only one aggregator may be selected for a team.
@@ -1810,7 +1810,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
- * @aggregator: the aggregator we're looking at
+ * @port: the port we're looking at
* @lacp_fast: boolean. whether fast periodic should be used
*/
static void ad_initialize_port(struct port *port, int lacp_fast)
@@ -1967,6 +1967,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
/**
* bond_3ad_initiate_agg_selection - initate aggregator selection
* @bond: bonding struct
+ * @timeout: timeout value to set
*
* Set the aggregation selection timer, to initiate an agg selection in
* the very near future. Called during first initialization, and during
@@ -2259,7 +2260,7 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
/**
* bond_3ad_state_machine_handler - handle state machines timeout
- * @bond: bonding struct to work on
+ * @work: work context to fetch bonding struct to work on from
*
* The state machine handling concept in this module is to check every tick
* which state machine should operate any function. The execution order is
@@ -2500,7 +2501,7 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
/**
* bond_3ad_handle_link_change - handle a slave's link status change indication
* @slave: slave struct to work on
- * @status: whether the link is now up or down
+ * @link: whether the link is now up or down
*
* Handle reselection of aggregator (if needed) for this port.
*/
@@ -2551,7 +2552,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
/**
* bond_3ad_set_carrier - set link state for bonding master
- * @bond - bonding structure
+ * @bond: bonding structure
*
* if we have an active aggregator, we're up, if not, we're down.
* Presumes that we cannot have an active aggregator if there are
@@ -2664,7 +2665,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
/**
* bond_3ad_update_lacp_rate - change the lacp rate
- * @bond - bonding struct
+ * @bond: bonding struct
*
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 095ea51d1853..4e1b7deb724b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1206,8 +1206,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/**
* alb_set_mac_address
- * @bond:
- * @addr:
+ * @bond: bonding we're working on
+ * @addr: MAC address to set
*
* In TLB mode all slaves are configured to the bond's hw address, but set
* their dev_addr field to different addresses (based on their permanent hw
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5ad43aaf76e5..42ef25ec0af5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -322,6 +322,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
/**
* bond_vlan_rx_add_vid - Propagates adding an id to slaves
* @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
* @vid: vlan id being added
*/
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
@@ -355,6 +356,7 @@ unwind:
/**
* bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
* @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
* @vid: vlan id being removed
*/
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
@@ -948,7 +950,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
/**
* change_active_interface - change the active slave into the specified one
* @bond: our bonding struct
- * @new: the new slave to make the active one
+ * @new_active: the new slave to make the active one
*
* Set the new slave to the bond's settings and unset them on the old
* curr_active_slave.
@@ -2205,7 +2207,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = __bond_release_one(bond_dev, slave_dev, false, true);
- if (ret == 0 && !bond_has_slaves(bond)) {
+ if (ret == 0 && !bond_has_slaves(bond) &&
+ bond_dev->reg_state != NETREG_UNREGISTERING) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond\n");
bond_remove_proc_entry(bond);
@@ -2271,7 +2274,7 @@ static int bond_miimon_inspect(struct bonding *bond)
"active " : "backup ") : "",
bond->params.downdelay * bond->params.miimon);
}
- /*FALLTHRU*/
+ fallthrough;
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
@@ -2307,7 +2310,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond->params.updelay *
bond->params.miimon);
}
- /*FALLTHRU*/
+ fallthrough;
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -2945,6 +2948,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (bond_time_in_interval(bond, last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
+ } else if (slave->link == BOND_LINK_BACK) {
+ bond_propose_link_state(slave, BOND_LINK_FAIL);
+ commit++;
}
continue;
}
@@ -3053,6 +3059,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
+ case BOND_LINK_FAIL:
+ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
+ BOND_SLAVE_NOTIFY_NOW);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
+
+ /* A slave has just been enslaved and has become
+ * the current active slave.
+ */
+ if (rtnl_dereference(bond->curr_active_slave))
+ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+ continue;
+
default:
slave_err(bond->dev, slave->dev,
"impossible: link_new_state %d on slave\n",
@@ -3103,8 +3122,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
-
bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && bond_slave_is_up(slave))
before = slave;
@@ -3305,7 +3322,7 @@ static int bond_slave_netdev_event(unsigned long event,
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
- /* Fallthrough */
+ fallthrough;
case NETDEV_DOWN:
/* Refresh slave-array if applicable!
* If the setup does not use miimon or arpmon (mode-specific!),
@@ -3743,7 +3760,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return -EINVAL;
mii->phy_id = 0;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG:
/* We do this again just in case we were called by SIOCGMIIREG
* instead of SIOCGMIIPHY.
@@ -4552,13 +4569,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
+{
+ if (speed == 0 || speed == SPEED_UNKNOWN)
+ speed = slave->speed;
+ else
+ speed = min(speed, slave->speed);
+
+ return speed;
+}
+
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- unsigned long speed = 0;
struct list_head *iter;
struct slave *slave;
+ u32 speed = 0;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
@@ -4570,8 +4597,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
- if (slave->speed != SPEED_UNKNOWN)
- speed += slave->speed;
+ if (slave->speed != SPEED_UNKNOWN) {
+ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
+ speed = bond_mode_bcast_speed(slave,
+ speed);
+ else
+ speed += slave->speed;
+ }
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
cmd->base.duplex = slave->duplex;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 9df2007b5e56..38e9f80ed1ef 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -898,7 +898,7 @@ static void at91_irq_err_state(struct net_device *dev,
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
/*
* from: ERROR_ACTIVE, ERROR_WARNING
@@ -948,7 +948,7 @@ static void at91_irq_err_state(struct net_device *dev,
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
reg_ier = AT91_IRQ_ERRP;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 6ad83a881039..9469d4421afe 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -659,7 +659,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
PCIEFD_REG_CAN_CLK_SEL);
- /* fall through */
+ fallthrough;
case CANFD_CLK_SEL_80MHZ:
priv->ucan.can.clock.freq = 80 * 1000 * 1000;
break;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d7222ba46622..d7c2ec529b8f 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -150,7 +150,7 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->read_reg = sp_read_reg16;
priv->write_reg = sp_write_reg16;
break;
- case 1: /* fallthrough */
+ case 1:
default:
priv->read_reg = sp_read_reg8;
priv->write_reg = sp_write_reg8;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 91cdc0a2b1a7..b4a39f0449ba 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -153,7 +153,7 @@ static void slc_bump(struct slcan *sl)
switch (*cmd) {
case 'r':
cf.can_id = CAN_RTR_FLAG;
- /* fallthrough */
+ fallthrough;
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
@@ -163,7 +163,7 @@ static void slc_bump(struct slcan *sl)
break;
case 'R':
cf.can_id = CAN_RTR_FLAG;
- /* fallthrough */
+ fallthrough;
case 'T':
cf.can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 5009ff294941..d17608870f2d 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -864,7 +864,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF)
priv->can.can_stats.error_warning++;
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index d2539c95adb6..66d0198e7834 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b7766b715fd..d91df34e7fa8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -345,7 +345,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
default:
netdev_warn(netdev, "tx urb submitting failed err=%d\n",
err);
- /* fall through */
+ fallthrough;
case -ENOENT:
/* cable unplugged */
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 53cb2f72bdd0..1689ab387612 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -133,10 +133,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
switch (id) {
case PCAN_USBPRO_TXMSG8:
i += 4;
- /* fall through */
+ fallthrough;
case PCAN_USBPRO_TXMSG4:
i += 4;
- /* fall through */
+ fallthrough;
case PCAN_USBPRO_TXMSG0:
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 6500179c2ca2..e731db900ee0 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1061,7 +1061,7 @@ static void b53_force_port_config(struct b53_device *dev, int port,
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
- /* fallthrough */
+ fallthrough;
case SPEED_1000:
reg |= PORT_OVERRIDE_SPEED_1000M;
break;
@@ -1554,6 +1554,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
return ret;
switch (ret) {
+ case -ETIMEDOUT:
+ return ret;
case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 629bf14128a2..5ae3d9783b68 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -170,7 +170,7 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
phylink_set(supported, 2500baseX_Full);
- /* fallthrough */
+ fallthrough;
case 1:
phylink_set(supported, 1000baseX_Full);
break;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index bafddb35f3a9..5ebff986a1ac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -566,7 +566,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
id_mode_dis = 1;
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
port_mode = EXT_GPHY;
break;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index dc999406ce86..3cb22d149813 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1083,7 +1083,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
interface = PHY_INTERFACE_MODE_GMII;
if (gbit)
break;
- /* fall through */
+ fallthrough;
case 0:
interface = PHY_INTERFACE_MODE_MII;
break;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 8dcb8a49ab67..0c37ddb58c1e 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -566,7 +566,7 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
case P5_INTF_SEL_PHY_P0:
/* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
val |= MHWTRAP_PHY0_SEL;
- /* fall through */
+ fallthrough;
case P5_INTF_SEL_PHY_P4:
/* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 7a71c9902e73..f0dbc05e30a4 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -875,7 +875,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
break;
case STATS_TYPE_BANK1:
reg = bank1_select;
- /* fall through */
+ fallthrough;
case STATS_TYPE_BANK0:
reg |= s->reg | histogram;
mv88e6xxx_g1_stats_read(chip, reg, &low);
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index f121619d81fe..2d23ccef7d0e 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -9,7 +9,7 @@ config NET_DSA_MSCC_FELIX
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
help
- This driver supports network switches from the the Vitesse /
+ This driver supports network switches from the Vitesse /
Microsemi / Microchip Ocelot family of switching cores that are
connected to their host CPU via Ethernet.
The following switches are supported:
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 139d0120f511..667f38c9e4c6 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1259,14 +1259,14 @@ el3_up(struct net_device *dev)
pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
- /* fall through */
+ fallthrough;
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
- /* fall through */
+ fallthrough;
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index ef1c3151fbb2..f66e7fb9a2bb 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -951,7 +951,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- u8 rx, tx, up;
+ u8 up;
pr_debug("%s: updating the statistics.\n", dev->name);
@@ -972,8 +972,8 @@ static void update_stats(struct net_device *dev)
dev->stats.tx_packets += (up&0x30) << 4;
/* Rx packets */ inb(ioaddr + 7);
/* Tx deferrals */ inb(ioaddr + 8);
- rx = inw(ioaddr + 10);
- tx = inw(ioaddr + 12);
+ /* rx */ inw(ioaddr + 10);
+ /* tx */ inw(ioaddr + 12);
EL3WINDOW(4);
/* BadSSD */ inb(ioaddr + 12);
@@ -1046,7 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data->phy_id = phy;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read the specified MII register. */
{
int saved_window;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index aeae7966a082..a00b36f91d9f 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -610,7 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
@@ -898,6 +898,7 @@ static int ax_close(struct net_device *dev)
/**
* axnet_tx_timeout - handle transmit time out condition
* @dev: network device which has apparently fallen asleep
+ * @txqueue: unused
*
* Called by kernel when device never acknowledges a transmit has
* completed (or failed) - i.e. never posted a Tx related interrupt.
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 645efac6310d..164c3ed550bf 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1108,7 +1108,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 6234fcd844ee..696517eae77f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1712,13 +1712,13 @@ static bool slic_is_fiber(unsigned short subdev)
{
switch (subdev) {
/* Mojave */
- case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough;
/* Oasis */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF:
return true;
}
return false;
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index ac86fcae1582..8470c836fa18 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -547,7 +547,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name);
break;
}
- /* Fall through */
+ fallthrough;
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 2a6c9725e092..a3a8edf9a734 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2180,13 +2180,10 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
int i;
for (i = first_index; i < first_index + count; i++) {
- /* Check if napi was initialized before */
- if (!ENA_IS_XDP_INDEX(adapter, i) ||
- adapter->ena_napi[i].xdp_ring)
- netif_napi_del(&adapter->ena_napi[i].napi);
- else
- WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ netif_napi_del(&adapter->ena_napi[i].napi);
+
+ WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].xdp_ring);
}
}
@@ -3601,16 +3598,14 @@ static void ena_fw_reset_device(struct work_struct *work)
{
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
- struct pci_dev *pdev = adapter->pdev;
- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- dev_err(&pdev->dev,
- "device reset schedule while reset bit is off\n");
- return;
- }
rtnl_lock();
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+
+ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ ena_destroy_device(adapter, false);
+ ena_restore_device(adapter);
+ }
+
rtnl_unlock();
}
@@ -3692,7 +3687,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
}
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.missed_tx = missed_tx;
+ tx_ring->tx_stats.missed_tx += missed_tx;
u64_stats_update_end(&tx_ring->syncp);
return rc;
@@ -4389,8 +4384,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
- del_timer_sync(&adapter->timer_service);
+ /* Make sure timer and reset routine won't be called after
+ * freeing device resources.
+ */
+ del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
@@ -4558,6 +4556,9 @@ static void ena_keep_alive_wd(void *adapter_data,
tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
u64_stats_update_begin(&adapter->syncp);
+ /* These stats are accumulated by the device, so the counters indicate
+ * all drops since last reset.
+ */
adapter->dev_stats.rx_drops = rx_drops;
adapter->dev_stats.tx_drops = tx_drops;
u64_stats_update_end(&adapter->syncp);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index b6c43b58ed3d..960d483e8997 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1475,7 +1475,7 @@ static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = lp->ext_phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG:
spin_lock_irq(&lp->lock);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 43294a148f8a..4ba75551cb17 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1538,7 +1538,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, any kind of event packet */
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1549,7 +1549,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, Sync packet */
+ fallthrough; /* to PTP v1, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1560,7 +1560,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, Delay_req packet */
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 16a944707ba9..8941ac4df9e3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1631,8 +1631,8 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
hw_atl_ts_reset_set(self, 0);
}
- err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get,
- self, val, val == 1, 10000U, 500000U);
+ err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self,
+ val, val == 1, 10000U, 500000U);
if (err)
return err;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 34d18302b1a3..a5fd161ab5ee 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -217,7 +217,7 @@ static int bgmac_probe(struct bcma_device *core)
/* BCM 471X/535X family */
case BCMA_CHIP_ID_BCM4716:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
+ fallthrough;
case BCMA_CHIP_ID_BCM47162:
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6795b6d95f54..f37f1c58f368 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -131,7 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
switch (bgmac->net_dev->phydev->speed) {
default:
netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n");
- /* fall through */
+ fallthrough;
case SPEED_1000:
val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c8cc14eadbb4..3e8a179f39db 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1337,13 +1337,13 @@ bnx2_set_mac_link(struct bnx2 *bp)
val |= BNX2_EMAC_MODE_PORT_MII_10M;
break;
}
- /* fall through */
+ fallthrough;
case SPEED_100:
val |= BNX2_EMAC_MODE_PORT_MII;
break;
case SPEED_2500:
val |= BNX2_EMAC_MODE_25G_MODE;
- /* fall through */
+ fallthrough;
case SPEED_1000:
val |= BNX2_EMAC_MODE_PORT_GMII;
break;
@@ -1995,26 +1995,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -7856,7 +7856,7 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = bp->phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u32 mii_regval;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1426c691c7c4..4e85e7dbc2be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -4712,14 +4712,14 @@ static void bnx2x_sync_link(struct link_params *params,
LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_10TFD:
vars->line_speed = SPEED_10;
break;
case LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_100T4:
case LINK_100TXFD:
vars->line_speed = SPEED_100;
@@ -4727,14 +4727,14 @@ static void bnx2x_sync_link(struct link_params *params,
case LINK_1000THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_1000TFD:
vars->line_speed = SPEED_1000;
break;
case LINK_2500THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_2500TFD:
vars->line_speed = SPEED_2500;
break;
@@ -6339,7 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
- /* fall through */
+ fallthrough;
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12508,13 +12508,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7f24d2689fdd..3c543dd7a8f3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8600,11 +8600,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* fall through */
+ fallthrough;
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* fall through */
+ fallthrough;
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 80d250a6d048..e26f4da5a6d7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* fall through */
+ fallthrough;
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* fall through */
+ fallthrough;
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index b4476f44e386..9c2f51f23035 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1809,7 +1809,7 @@ get_vf:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
- /* fall through */
+ fallthrough;
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
@@ -2207,7 +2207,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
- /* Fall through - to release resources */
+ fallthrough; /* to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 31fb5a28e1c4..f92fd8863f48 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1923,7 +1923,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
break;
case BNXT_FW_HEALTH_REG_TYPE_GRC:
reg_off = fw_health->mapped_regs[reg_idx];
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
val = readl(bp->bar0 + reg_off);
break;
@@ -1966,11 +1966,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
}
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -9765,7 +9765,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
mdio->phy_id = bp->link_info.phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 mii_regval = 0;
@@ -11022,7 +11022,7 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
writel(reg_off & BNXT_GRC_BASE_MASK,
bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
writel(val, bp->bar0 + reg_off);
break;
@@ -11135,7 +11135,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
}
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_RESET_FW:
bnxt_reset_all(bp);
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
@@ -11158,7 +11158,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
pci_set_master(bp->pdev);
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_POLL_FW:
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
@@ -11173,7 +11173,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
while (!rtnl_trylock()) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 64da654f1038..17934cd87a4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1073,7 +1073,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -1092,7 +1092,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 2704a4709bc7..fcc262064766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -201,10 +201,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* Fall thru */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
- /* Fall thru */
+ fallthrough;
case XDP_DROP:
bnxt_reuse_rx_data(rxr, cons, page);
break;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c5cca63b8571..84536292b031 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3311,7 +3311,7 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
}
case CNIC_CTL_FCOE_STATS_GET_CMD:
ulp_type = CNIC_ULP_FCOE;
- /* fall through */
+ fallthrough;
case CNIC_CTL_ISCSI_STATS_GET_CMD:
cnic_hold(dev);
cnic_copy_ulp_stats(dev, ulp_type);
@@ -4044,7 +4044,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
}
- /* Fall through */
+ fallthrough;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1fecc25767bd..0ca8436d2e9d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1185,10 +1185,10 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
- /* fall through */
+ fallthrough;
case BCMGENET_STAT_MIB_TX:
offset += BCMGENET_STAT_OFFSET;
- /* fall through */
+ fallthrough;
case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 511d553a4d11..6fb6c3556285 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -192,7 +192,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_INTERNAL:
phy_name = "internal PHY";
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ebff1fc0d8ce..9894594d957d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -715,7 +715,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
- /* fall through */
+ fallthrough;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -776,7 +776,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
- /* fall through */
+ fallthrough;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -1586,7 +1586,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
- /* fall through */
+ fallthrough;
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
@@ -2114,7 +2114,7 @@ static int tg3_phy_init(struct tg3 *tp)
phy_support_asym_pause(phydev);
break;
}
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
@@ -4390,7 +4390,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
MII_TG3_DSP_TAP26_RMRXSTO |
MII_TG3_DSP_TAP26_OPCSINPT;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- /* Fall through */
+ fallthrough;
case ASIC_REV_5720:
case ASIC_REV_5762:
if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
@@ -4538,7 +4538,7 @@ static int tg3_phy_pull_config(struct tg3 *tp)
tp->link_config.speed = SPEED_1000;
break;
}
- /* Fall through */
+ fallthrough;
default:
goto done;
}
@@ -5209,7 +5209,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
- /* fall through */
+ fallthrough;
case ANEG_STATE_AN_ENABLE:
ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
if (ap->flags & MR_AN_ENABLE) {
@@ -5239,7 +5239,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ret = ANEG_TIMER_ENAB;
ap->state = ANEG_STATE_RESTART;
- /* fall through */
+ fallthrough;
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
if (delta > ANEG_STATE_SETTLE_TIME)
@@ -5282,7 +5282,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->state = ANEG_STATE_ACK_DETECT;
- /* fall through */
+ fallthrough;
case ANEG_STATE_ACK_DETECT:
if (ap->ack_match != 0) {
if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
@@ -10720,40 +10720,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
- /* fall through */
+ fallthrough;
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
- /* fall through */
+ fallthrough;
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
- /* fall through */
+ fallthrough;
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
- /* fall through */
+ fallthrough;
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
- /* fall through */
+ fallthrough;
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
- /* fall through */
+ fallthrough;
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
- /* fall through */
+ fallthrough;
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
- /* fall through */
+ fallthrough;
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
- /* fall through */
+ fallthrough;
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
- /* fall through */
+ fallthrough;
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
- /* fall through */
+ fallthrough;
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
- /* fall through */
+ fallthrough;
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
@@ -13998,7 +13998,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: {
u32 mii_regval;
@@ -17136,7 +17136,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
break;
}
- /* fallthrough */
+ fallthrough;
case 128:
default:
val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
@@ -17151,28 +17151,28 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
DMA_RWCTRL_WRITE_BNDRY_16);
break;
}
- /* fallthrough */
+ fallthrough;
case 32:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_32 |
DMA_RWCTRL_WRITE_BNDRY_32);
break;
}
- /* fallthrough */
+ fallthrough;
case 64:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_64 |
DMA_RWCTRL_WRITE_BNDRY_64);
break;
}
- /* fallthrough */
+ fallthrough;
case 128:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_128 |
DMA_RWCTRL_WRITE_BNDRY_128);
break;
}
- /* fallthrough */
+ fallthrough;
case 256:
val |= (DMA_RWCTRL_READ_BNDRY_256 |
DMA_RWCTRL_WRITE_BNDRY_256);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 49358d42a0e2..b9dd06b12945 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -321,7 +321,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
del_timer(&ioc->ioc_timer);
- /* fall through */
+ fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
@@ -780,7 +780,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
del_timer(&ioc->iocpf_timer);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
@@ -849,7 +849,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
del_timer(&ioc->iocpf_timer);
- /* fall through*/
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 40107a9bd120..a2c983f56b00 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1084,7 +1084,7 @@ bna_enet_sm_cfg_wait(struct bna_enet *enet,
case ENET_E_CHLD_STOPPED:
bna_enet_rx_start(enet);
- /* Fall through */
+ fallthrough;
case ENET_E_FWRESP_PAUSE:
if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index b5ecbfe13ab0..2623a0da4682 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1636,7 +1636,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
&q1->qpt);
cfg_req->q_cfg[i].qs.rx_buffer_size =
htons((u16)q1->buffer_size);
- /* Fall through */
+ fallthrough;
case BNA_RXP_SINGLE:
/* Large/Single RxQ */
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 31ebf3ee7ec0..283918aeb741 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -460,7 +460,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
- /* fall through */
+ fallthrough;
case HWTSTAMP_TX_ON:
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e73bc211779a..8e0ed01e7f03 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -977,15 +977,14 @@ static void octeon_destroy_resources(struct octeon_device *oct)
schedule_timeout_uninterruptible(HZ / 10);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_HOST_OK:
- /* fallthrough */
case OCT_DEV_CONSOLE_INIT_DONE:
/* Remove any consoles */
octeon_remove_consoles(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IO_QUEUES_DONE:
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
@@ -1027,7 +1026,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_free_sc_done_list(oct);
octeon_free_sc_zombie_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
@@ -1062,17 +1061,17 @@ static void octeon_destroy_resources(struct octeon_device *oct)
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct))
octeon_free_ioq_vector(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MBOX_SETUP_DONE:
if (OCTEON_CN23XX_PF(oct))
oct->fn_list.free_mbox(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/* Wait for any pending operations */
@@ -1095,11 +1094,11 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
@@ -1110,16 +1109,16 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->sriov_info.sriov_enabled)
pci_disable_sriov(oct->pci_dev);
#endif
- /* fallthrough */
+ fallthrough;
case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_MAP_DONE:
refcount = octeon_deregister_device(oct);
@@ -1137,13 +1136,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */
break;
@@ -2168,7 +2167,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 90ef21086f27..8c5879e31240 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -460,9 +460,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
schedule_timeout_uninterruptible(HZ / 10);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_HOST_OK:
- /* fallthrough */
case OCT_DEV_IO_QUEUES_DONE:
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
@@ -504,7 +503,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_free_sc_done_list(oct);
octeon_free_sc_zombie_list(oct);
- /* fall through */
+ fallthrough;
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
@@ -533,15 +532,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
else
cn23xx_vf_ask_pf_to_do_flr(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
octeon_free_ioq_vector(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MBOX_SETUP_DONE:
oct->fn_list.free_mbox(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
mdelay(100);
@@ -551,11 +550,11 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_delete_droq(oct, i);
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
@@ -563,27 +562,27 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_delete_instr_queue(oct, i);
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_MAP_DONE:
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */
break;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 83dabcffc789..c7bdac79299a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -522,7 +522,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case IPV4_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c1378b5c780c..063e560d9c1b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -594,10 +594,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true;
default:
bpf_warn_invalid_xdp_action(action);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
- /* fall through */
+ fallthrough;
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 42c6e9379882..387c357e1b8e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2543,7 +2543,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
!(data->phy_id & 0xe0e0))
data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
data->phy_id & 0x1f);
- /* FALLTHRU */
+ fallthrough;
case SIOCGMIIPHY:
return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
case SIOCCHIOCTL:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index b3e4118a15e7..9749d1239f58 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -136,7 +136,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
- /* fall through */
+ fallthrough;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index c4864125fe02..a10a6862a9a4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -231,7 +231,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
- /* fall through */
+ fallthrough;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index d2b587d1670a..869431a1eedd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2553,19 +2553,22 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
- flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) +
- sizeof(*wr), sizeof(__be64));
+ flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
+ sizeof(__be64));
ndesc = flits_to_desc(flits);
lb = &pi->ethtool_lb;
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
+ __netif_tx_lock(q->txq, smp_processor_id());
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
- if (unlikely(credits < 0))
+ if (unlikely(credits < 0)) {
+ __netif_tx_unlock(q->txq);
return -ENOMEM;
+ }
wr = (void *)&q->q.desc[q->q.pidx];
memset(wr, 0, sizeof(struct tx_desc));
@@ -2598,6 +2601,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ __netif_tx_unlock(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8a56491bb034..fa3367966f4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7656,13 +7656,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
- /* Fall through */
+ fallthrough;
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
- /* Fall through */
+ fallthrough;
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
- /* Fall through */
+ fallthrough;
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index dbe8ee7e0e21..e2fe78e2e242 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -517,7 +517,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
cpl = (void *)p;
}
- /* Fall through */
+ fallthrough;
case CPL_SGE_EGR_UPDATE: {
/*
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 6bc7e7ba38c3..552d89fdf54a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,7 +272,7 @@ static netdev_features_t enic_features_check(struct sk_buff *skb,
case ntohs(ETH_P_IPV6):
if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
goto out;
- /* Fall through */
+ fallthrough;
case ntohs(ETH_P_IP):
break;
default:
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 66e67b24a887..62e271aea4a5 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2389,7 +2389,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
if (!netdev) {
dev_err(dev, "Can't allocate ethernet device #%d\n", id);
return -ENOMEM;
@@ -2521,7 +2521,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
}
port->netdev = NULL;
- free_netdev(netdev);
return ret;
}
@@ -2530,7 +2529,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
- free_netdev(port->netdev);
return 0;
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 7f7705138262..5c6c8c5ec747 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -385,7 +385,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
- /* fall through */
+ fallthrough;
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 0ccd9994ad45..f9dd1aa9f2da 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3203,7 +3203,7 @@ srom_map_media(struct net_device *dev)
case SROM_10BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_10BASET:
if (lp->params.fdx && !lp->fdx) return -1;
@@ -3225,7 +3225,7 @@ srom_map_media(struct net_device *dev)
case SROM_100BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_100BASET:
if (lp->params.fdx && !lp->fdx) return -1;
@@ -3239,7 +3239,7 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9db23527275a..3a8659c5da06 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -911,7 +911,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
data->phy_id = 1;
else
return -ENODEV;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5dcc66f60144..5a43be327f58 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1443,7 +1443,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index d6ed1d943762..99cc1c46fb30 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -571,7 +571,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
break;
}
}
- /* fall through */
+ fallthrough;
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 43570f4911ea..fdff3b4723ba 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -945,7 +945,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- /* fall through */
+ fallthrough;
case FQ_TYPE_TX_CONFIRM:
dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
break;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9db2a02fb531..1268996b7030 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -375,7 +375,7 @@ static int dpaa_get_hash_opts(struct net_device *dev,
case UDP_V6_FLOW:
if (priv->keygen_in_use)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case IPV4_FLOW:
case IPV6_FLOW:
case SCTP_V4_FLOW:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 457106e761be..cf5383bb8331 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -376,10 +376,10 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9934421814b4..fb37816a74db 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3715,11 +3715,11 @@ failed_mii_init:
failed_irq:
failed_init:
fec_ptp_stop(pdev);
- if (fep->reg_phy)
- regulator_disable(fep->reg_phy);
failed_reset:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
failed_clk_ahb:
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 645764abdaae..bb9887f98841 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -528,7 +528,7 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac,
case 100:
tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
break;
- case 1000: /* fallthrough */
+ case 1000:
default:
tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index c27df153f895..624b2eb6f01d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1344,10 +1344,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
set_rx_dflt_cfg(port, params);
- /* fall through */
+ fallthrough;
case FMAN_PORT_TYPE_TX:
set_tx_dflt_cfg(port, params, &port->dts_params);
- /* fall through */
+ fallthrough;
default:
set_dflt_cfg(port, params);
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b513b8c5c3b5..41dd3d0f3452 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -750,8 +750,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
continue;
err = gfar_parse_group(child, priv, model);
- if (err)
+ if (err) {
+ of_node_put(child);
goto err_grp_init;
+ }
}
} else { /* SQ_SG_MODE */
err = gfar_parse_group(np, priv, model);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index db791f60b884..714b501be7d0 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1348,7 +1348,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;
- /* FALLTHROUGH */
+ fallthrough;
case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UCC_GETH_UPSMR_RMM;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 49624acf2473..4eb50296f653 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -305,7 +305,7 @@ static int __lb_setup(struct net_device *ndev,
break;
case MAC_LOOP_PHY_NONE:
ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
- /* fall through */
+ fallthrough;
case MAC_LOOP_NONE:
if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 87776ce3539b..c2ea0348b2f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2746,7 +2746,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
- /* fall through */
+ fallthrough;
case HNS3_OL4_TYPE_NO_TUN:
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 36575e72a915..d553ed7ee64c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3061,7 +3061,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
* by first decoding the types of errors.
*/
set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
- /* fall through */
+ fallthrough;
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
@@ -3686,12 +3686,10 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
case HNAE3_GLOBAL_RESET:
- /* fall through */
case HNAE3_IMP_RESET:
ret = hclge_set_rst_done(hdev);
break;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 9162856de1b1..e972138a14ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1728,7 +1728,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* hardware completion status should be available by this time */
if (ret) {
dev_err(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
+ "couldn't get reset done status from h/w, timeout!\n");
return ret;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index c6adc776f3c8..16bda7381ba0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -334,19 +334,14 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
static int chip_fault_show(struct devlink_fmsg *fmsg,
struct hinic_fault_event *event)
{
- char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
- "fatal", "reset", "flr", "general", "suggestion"};
- char level_str[FAULT_SHOW_STR_LEN + 1] = {0};
- u8 level;
+ const char * const level_str[FAULT_LEVEL_MAX + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion", "Unknown"};
+ u8 fault_level;
int err;
- level = event->event.chip.err_level;
- if (level < FAULT_LEVEL_MAX)
- strncpy(level_str, fault_level[level], strlen(fault_level[level]));
- else
- strncpy(level_str, "Unknown", strlen("Unknown"));
-
- if (level == FAULT_LEVEL_SERIOUS_FLR) {
+ fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
+ event->event.chip.err_level : FAULT_LEVEL_MAX;
+ if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
(u32)event->event.chip.func_id);
if (err)
@@ -361,7 +356,7 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
if (err)
return err;
- err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str);
+ err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
if (err)
return err;
@@ -381,18 +376,15 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
static int fault_report_show(struct devlink_fmsg *fmsg,
struct hinic_fault_event *event)
{
- char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ const char * const type_str[FAULT_TYPE_MAX + 1] = {
"chip", "ucode", "mem rd timeout", "mem wr timeout",
- "reg rd timeout", "reg wr timeout", "phy fault"};
- char type_str[FAULT_SHOW_STR_LEN + 1] = {0};
+ "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
+ u8 fault_type;
int err;
- if (event->type < FAULT_TYPE_MAX)
- strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type]));
- else
- strncpy(type_str, "Unknown", strlen("Unknown"));
+ fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
- err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str);
+ err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
if (err)
return err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index dc6e645f2689..701eb81e09a7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -504,8 +504,6 @@ enum hinic_fault_type {
FAULT_TYPE_MAX,
};
-#define FAULT_SHOW_STR_LEN 16
-
enum hinic_fault_err_level {
FAULT_LEVEL_FATAL,
FAULT_LEVEL_SERIOUS_RESET,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 0273fb7a9d01..3153d62cc73e 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3247,7 +3247,7 @@ static int ehea_mem_notifier(struct notifier_block *nb,
switch (action) {
case MEM_CANCEL_OFFLINE:
pr_info("memory offlining canceled");
- /* Fall through - re-add canceled memory block */
+ fallthrough; /* re-add canceled memory block */
case MEM_ONLINE:
pr_info("memory is going online");
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 06248a7db7f2..c00b9097eeea 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2319,7 +2319,7 @@ static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = dev->phy.address;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG:
data->val_out = emac_mdio_read(ndev, dev->phy.address,
data->reg_num);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 63dde3bcf5bc..664e8ccc88d2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4079,7 +4079,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- fallthrough;
case e1000_pch_tgp:
case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index a62ddd626929..c0c8efe42fce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -981,7 +981,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
__le16 seid;
__le16 vlan_tag;
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index afad5e9f80e0..6ab52cbd697a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1967,6 +1967,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
}
/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+ u16 min)
+{
+ return (aq->api_maj_ver > maj ||
+ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2091,18 +2106,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc &&
- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1)))
- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2199,11 +2212,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
+ if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b5399357a667..2e433fdbf2c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -15463,6 +15463,9 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ usleep_range(1000, 2000);
+
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4f05f6efe6af..d9c3a6b169f9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -718,7 +718,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
case e1000_i354:
case e1000_i210:
case e1000_i211:
- fallthrough;
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 7a6f2a0d413f..9593aa4eea36 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5142,6 +5142,8 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+ igc_ptp_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -5158,9 +5160,6 @@ static int igc_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* do hw tstamp init after resetting */
- igc_ptp_init(adapter);
-
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index e67d4655b47e..36c999250fcc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -496,8 +496,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- igc_ptp_reset(adapter);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index e67b1a59ecb7..0fcd82036d4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -193,7 +193,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 832bbb8b05c8..dfcb1767acbb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2205,10 +2205,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(pp->dev, prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
ret = MVNETA_XDP_DROPPED;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index d4a4e241333d..41d935d1aaf6 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1638,7 +1638,7 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
- /* Fallthrough */
+ fallthrough;
case MVPP22_FLOW_IP4:
case MVPP22_FLOW_IP6:
if (info->data & RXH_L2DA)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2a8a5842eaef..6e140d1b8967 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5437,7 +5437,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
}
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- /* Fall-through */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -5451,7 +5451,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 1000baseX_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- /* Fall-through */
+ fallthrough;
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
if (port->comphy ||
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 36953d4f51c7..01a793105599 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -737,7 +737,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
- /* Fall through */
+ fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 5975521a4c86..93c4cf7fedbf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1226,8 +1226,8 @@ int otx2_config_npa(struct otx2_nic *pfvf)
if (!hw->pool_cnt)
return -EINVAL;
- qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
- hw->pool_cnt, GFP_KERNEL);
+ qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
+ sizeof(struct otx2_pool), GFP_KERNEL);
if (!qset->pool)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index b792f6306a64..6a930351cb23 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2448,7 +2448,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = hw->phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 val = 0;
spin_lock_bh(&hw->phy_lock);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cec8124301c7..344864275ed5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1376,7 +1376,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = PHY_ADDR_MARV;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 val = 0;
@@ -2764,7 +2764,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXCHKSVLAN:
sky2_rx_tag(sky2, length);
- /* fall through */
+ fallthrough;
case OP_RXCHKS:
if (likely(dev->features & NETIF_F_RXCSUM))
sky2_rx_checksum(sky2, status);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 0870fe78ea38..6d2d60675ffd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -228,7 +228,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
if (!MTK_HAS_CAPS(mac->hw->soc->caps,
MTK_GMAC1_TRGMII))
goto err_phy;
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -501,11 +501,11 @@ static void mtk_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
phylink_set(mask, 1000baseT_Half);
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_SGMII:
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RMII:
case PHY_INTERFACE_MODE_REVMII:
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 7a04c626a2aa..bcd166911d44 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -72,7 +72,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET:
MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err);
break;
- case MLXFW_FSM_STATE_ERR_OK: /* fall through */
+ case MLXFW_FSM_STATE_ERR_OK:
case MLXFW_FSM_STATE_ERR_MAX:
MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
break;
@@ -155,7 +155,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED:
MLXFW_REACT_ERR("fw already activated", err);
break;
- case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */
+ case MLXFW_FSM_REACTIVATE_STATUS_OK:
case MLXFW_FSM_REACTIVATE_STATUS_MAX:
MLXFW_REACT_ERR("unexpected error", err);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 08d101138fbe..ec45a03140d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -2289,21 +2289,21 @@ int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
/* Here we need to get the module width according to the module type. */
switch (module_type) {
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
return 8;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
return 1;
default:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 44fa02cbb683..056eeb85be60 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -30,8 +30,8 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
*qsfp = false;
break;
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
*qsfp = true;
break;
@@ -205,7 +205,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 ||
module_rev_id >=
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 3fe878d7c94c..61719ec89808 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -259,8 +259,8 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
*/
fault = 1;
break;
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
case MLXSW_REG_MTBR_INDEX_NA:
default:
fault = 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index fdf9aa8314b2..4186e29119c2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -517,8 +517,8 @@ enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
return MLXSW_REG_SPMS_STATE_FORWARDING;
case BR_STATE_LEARNING:
return MLXSW_REG_SPMS_STATE_LEARNING;
- case BR_STATE_LISTENING: /* fall-through */
- case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
return MLXSW_REG_SPMS_STATE_DISCARDING;
default:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index f9ba59641b4d..5240bf11b6c4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -636,11 +636,11 @@ static inline unsigned int
mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
{
switch (type) {
- case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 0521e9d48c45..24f1fd1f8d56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1164,7 +1164,7 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
addr_len = 4;
addr_prefix_len = 32;
break;
- case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON(1);
return NULL;
@@ -4555,14 +4555,14 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
return 0;
}
- /* fall through */
+ fallthrough;
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
case RTN_BLACKHOLE:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
return 0;
- case RTN_UNREACHABLE: /* fall through */
+ case RTN_UNREACHABLE:
case RTN_PROHIBIT:
/* Packets hitting these routes need to be trapped, but
* can do so with a lower priority than packets directed
@@ -5990,7 +5990,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
fib_work->fnh_info.fib_nh);
@@ -6050,7 +6050,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
@@ -6089,7 +6089,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_nh_notifier_info *fnh_info;
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
@@ -6099,7 +6099,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
@@ -6116,8 +6116,8 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
int err;
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
@@ -6136,13 +6136,13 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
mr_cache_hold(fib_work->men_info.mfc);
break;
- case FIB_EVENT_VIF_ADD: /* fall through */
+ case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
dev_hold(fib_work->ven_info.dev);
@@ -6215,13 +6215,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
router = container_of(nb, struct mlxsw_sp_router, fib_nb);
switch (event) {
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
return notifier_from_errno(err);
- case FIB_EVENT_ENTRY_ADD: /* fall through */
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
@@ -7277,7 +7277,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
goto out;
switch (event) {
- case NETDEV_CHANGEMTU: /* fall through */
+ case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 5c959a995199..1d18e41ab255 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1523,12 +1523,12 @@ mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
enum mlxsw_sp_span_trigger_type type;
switch (trigger_entry->trigger) {
- case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
case MLXSW_SP_SPAN_TRIGGER_EGRESS:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
break;
- case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */
- case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
case MLXSW_SP_SPAN_TRIGGER_ECN:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a26162b08b7d..72912afa6f72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1297,7 +1297,7 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
uip = be32_to_cpu(addr->addr4);
sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
break;
- case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON(1);
return -EOPNOTSUPP;
@@ -2870,7 +2870,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
fdb_info = &switchdev_work->fdb_info;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
- case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
/* These events are only used to potentially update an existing
* SPAN mirror.
@@ -3116,9 +3116,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
@@ -3138,7 +3138,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
*/
dev_hold(dev);
break;
- case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
INIT_WORK(&switchdev_work->work,
mlxsw_sp_switchdev_vxlan_fdb_event_work);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index c533d06fbe3a..dcde496da7fb 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -548,7 +548,7 @@ static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
case TCP_V4_FLOW:case UDP_V4_FLOW:
case TCP_V6_FLOW:case UDP_V6_FLOW:
rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case IPV4_FLOW: case IPV6_FLOW:
rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 867c680f5917..5abb7d2b0a9e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -859,7 +859,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
switch (state) {
case BR_STATE_FORWARDING:
ocelot->bridge_fwd_mask |= BIT(port);
- /* Fallthrough */
+ fallthrough;
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index c2867fe995bc..3de8430ee8c5 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -3081,7 +3081,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phy_addr_external;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
/* The phy_id is not enough to uniquely identify
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 4f1f90f5e178..78eba10300ae 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -3768,20 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 7c50e3dfb9d5..76c51da5b66f 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -296,7 +296,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
break;
}
#endif
- /* fall through */
+ fallthrough;
case AF_INET:
req_sz = sizeof(struct nfp_crypto_req_add_v4);
ipv6 = false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index ff844e5cc41f..1cbe2c9f3959 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -297,7 +297,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app,
case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
- /* FALLTHROUGH */
+ fallthrough;
default:
return NFP_FL_TUNNEL_NONE;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index a050cb898782..f21cf1f40f98 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -289,7 +289,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break;
}
- /* fall through */
+ fallthrough;
default:
err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 4651fe417b7f..36356f96661d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -784,7 +784,7 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
if (tunnel_act)
*tunnel_act = true;
- /* fall through */
+ fallthrough;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
memcpy(act_dst + act_off, act_src + act_off, act_len);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index b04b83687fe2..2643ea5948f4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -137,7 +137,7 @@ static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
val;
case NN_LM_MOD_DEC:
lm_dec = true;
- /* fall through */
+ fallthrough;
case NN_LM_MOD_INC:
if (val) {
pr_err("LM offset in inc/dev mode\n");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 39ee23e8c0bf..21ea22694e47 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1940,10 +1940,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index a486008eb80a..252fe06f58aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -340,12 +340,12 @@ static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
switch (maptype) {
case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
bartok = -1;
- /* FALLTHROUGH */
+ fallthrough;
case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
baract = NFP_CPP_ACTION_RW;
if (act == 0)
act = NFP_CPP_ACTION_RW;
- /* FALLTHROUGH */
+ fallthrough;
case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
break;
default:
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 75f012444796..2260c2403a83 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -213,7 +213,7 @@ u64 nfp_rtsym_size(const struct nfp_rtsym *sym)
return 0;
default:
pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type);
- /* fall through */
+ fallthrough;
case NFP_RTSYM_TYPE_OBJECT:
case NFP_RTSYM_TYPE_FUNCTION:
return sym->size;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index a26966fa40b9..dceec80fd642 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -410,7 +410,7 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
case SPEED_1000 + HALF_DUPLEX:
netdev_dbg(adapter->netdev,
"Half Duplex is not supported at 1000 Mbps\n");
- /* fall through */
+ fallthrough;
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
netdev_dbg(adapter->netdev,
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 647a1431b359..3da075307178 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1356,7 +1356,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phys[0] & 0x1f;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1944bf5264db..26988ad7ec97 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -412,7 +412,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->flags = flags;
- new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
+ new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
GFP_KERNEL);
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
@@ -462,7 +462,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
- new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
+ new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
GFP_KERNEL);
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 66f45fce90fa..c3f50ddbe824 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -153,7 +153,7 @@ skip:
case NETXEN_BRDTYPE_P3_4_GB_MM:
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
- /* fall through */
+ fallthrough;
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
@@ -182,7 +182,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
- /* fall through */
+ fallthrough;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
supported |= SUPPORTED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 876743a79c1f..0e4cd8890cff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2046,7 +2046,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
- /* fall through */
+ fallthrough;
case QED_PCI_ETH:
{
struct qed_eth_pf_params *p_params =
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b3c9ebaf2280..b8f076e4e6b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3109,14 +3109,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2558cb680db3..f39f629242a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -761,7 +761,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
kfree(int_params->msix_table);
if (force_mode)
goto out;
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_MSI:
if (cdev->num_hwfns == 1) {
@@ -775,7 +775,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
if (force_mode)
goto out;
}
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_INTA:
int_params->out.int_mode = QED_INT_MODE_INTA;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 5be08f83e0aa..cd882c453394 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1085,7 +1085,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_NOTICE(p_hwfn,
"Unknown WoL configuration %02x\n",
p_hwfn->cdev->wol_config);
- /* Fallthrough */
+ fallthrough;
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
@@ -1365,7 +1365,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
p_link->full_duplex = false;
- /* Fall-through */
+ fallthrough;
case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
p_link->speed = 1000;
break;
@@ -2451,7 +2451,7 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
- /* Fallthrough */
+ fallthrough;
default:
rc = -EINVAL;
}
@@ -3546,7 +3546,7 @@ qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
switch (p_in_params->cmd) {
case DRV_MSG_SET_RESOURCE_VALUE_MSG:
mfw_resc_info.size = p_in_params->resc_max_val;
- /* Fallthrough */
+ fallthrough;
case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
break;
default:
@@ -3823,7 +3823,7 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"Resource unlock request for an already released resource [%d]\n",
p_params->resource);
- /* Fallthrough */
+ fallthrough;
case RESOURCE_OPCODE_RELEASED:
p_params->b_released = true;
break;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0d0e38debbc2..569e2a7a64e5 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1542,7 +1542,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
if (test_bit(QL_LINK_MASTER, &qdev->flags))
ql_port_start(qdev);
qdev->port_link_state = LS_DOWN;
- /* Fall Through */
+ fallthrough;
case LS_DOWN:
if (curr_link_state == LS_UP) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5c2a3acf1e89..b9894d54469c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -353,7 +353,7 @@ skip:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
- /* fall through */
+ fallthrough;
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
@@ -377,7 +377,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
- /* fall through */
+ fallthrough;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 20b1b43a0e39..1166b98d8bb2 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -474,13 +474,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
if (ret)
- return ret;
+ goto disable_clk_axi;
ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
if (ret)
- return ret;
+ goto disable_clk_cfg_ahb;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+ if (ret)
+ goto disable_clk_cfg_ahb;
- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+ return 0;
+
+disable_clk_cfg_ahb:
+ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
+disable_clk_axi:
+ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
+
+ return ret;
}
/* Enable clocks; needs emac_clks_phase1_init to be called before */
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index d1da92ac7fbe..fc9e6626db55 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4994,7 +4994,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
flags = PCI_IRQ_LEGACY;
break;
@@ -5137,7 +5137,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fc99e7118e49..42458a46ffaf 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2169,7 +2169,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
rocker_world_fib4_del(rocker, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
rule = fib_work->fr_info.rule;
if (!fib4_rule_default(rule))
@@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
fib_work->event = event;
switch (event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
@@ -2224,7 +2224,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
fib_rule_get(fib_work->fr_info.rule);
@@ -2811,7 +2811,7 @@ static int rocker_switchdev_event(struct notifier_block *unused,
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 21465cb3d60a..7f8b10c49660 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -316,7 +316,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -327,7 +327,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 9729983f4840..c54b7f8243f3 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -142,7 +142,7 @@ static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_locatio
/* Temporarily map new BAR. */
rc = efx_init_io(efx, bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -160,7 +160,7 @@ static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_locatio
/* Put old BAR back. */
rc = efx_init_io(efx, previous_bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, previous_bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -334,7 +334,7 @@ static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap,
/* Temporarily map BAR. */
rc = efx_init_io(efx, bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -495,7 +495,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx, fcw.bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, fcw.bar));
if (rc)
goto fail;
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 36598d0542ed..19fe86b3b316 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -431,18 +431,18 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
/* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
* and reprobe after reset to avoid removing filters twice
*/
- down_read(&efx->filter_sem);
+ down_write(&efx->filter_sem);
ef100_filter_table_down(efx);
- up_read(&efx->filter_sem);
+ up_write(&efx->filter_sem);
rc = efx_mcdi_reset(efx, reset_type);
if (rc)
return rc;
netif_device_attach(efx->net_dev);
- down_read(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = ef100_filter_table_up(efx);
- up_read(&efx->filter_sem);
+ up_write(&efx->filter_sem);
if (rc)
return rc;
@@ -739,6 +739,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
.fini_dmaq = efx_fini_dmaq,
.max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.filter_table_probe = ef100_filter_table_up,
@@ -820,6 +821,7 @@ const struct efx_nic_type ef100_vf_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
.fini_dmaq = efx_fini_dmaq,
.max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.filter_table_probe = ef100_filter_table_up,
@@ -979,7 +981,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
* EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
* This is very unlikely to fail.
*/
- if (EFX_MIN_DMAQ_SIZE % reader->value) {
+ if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
+ EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
netif_err(efx, probe, efx->net_dev,
"%s size granularity is %llu, can't guarantee safety\n",
reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 13ba1a4f66fc..012925e878f4 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -31,6 +31,11 @@
#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
+bool ef100_rx_buf_hash_valid(const u8 *prefix)
+{
+ return PREFIX_FIELD(prefix, RSS_HASH_VALID);
+}
+
static bool check_fcs(struct efx_channel *channel, u32 *prefix)
{
u16 rxclass;
diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h
index f2f266863966..fe45b36458d1 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.h
+++ b/drivers/net/ethernet/sfc/ef100_rx.h
@@ -14,6 +14,7 @@
#include "net_driver.h"
+bool ef100_rx_buf_hash_valid(const u8 *prefix);
void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
void ef100_rx_write(struct efx_rx_queue *rx_queue);
void __ef100_rx_packet(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a9808e86068d..daf0c00c1242 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -45,6 +45,14 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
__ef100_rx_packet, __efx_rx_packet,
channel);
}
+static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
+{
+ if (efx->type->rx_buf_hash_valid)
+ return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
+ ef100_rx_buf_hash_valid,
+ prefix);
+ return true;
+}
/* Maximum number of TCP segments we support for soft-TSO */
#define EFX_TSO_MAX_SEGS 100
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index db90d94e24c9..a6bae6a234ba 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -957,7 +957,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 332183280a45..fa1ade856b10 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -1049,10 +1049,10 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EF4_RX_PKT_TCP;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EF4_RX_PKT_CSUMMED;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
@@ -1310,7 +1310,7 @@ int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
- /* else fall through */
+ fallthrough;
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
@@ -1983,7 +1983,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
is_full = true;
- /* fall through */
+ fallthrough;
case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2;
@@ -2034,7 +2034,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
EF4_FARCH_FILTER_MAC_WILD);
@@ -2081,7 +2081,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
case EF4_FARCH_FILTER_TCP_FULL:
case EF4_FARCH_FILTER_UDP_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FARCH_FILTER_TCP_WILD:
case EF4_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2;
@@ -2125,7 +2125,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
case EF4_FARCH_FILTER_MAC_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
if (is_full)
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index d07eeaad9bdf..4002f9a3ae90 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1038,10 +1038,10 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
@@ -1316,7 +1316,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
- /* else fall through */
+ fallthrough;
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
@@ -2043,7 +2043,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
is_full = true;
- /* fall through */
+ fallthrough;
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2;
@@ -2094,7 +2094,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
EFX_FARCH_FILTER_MAC_WILD);
@@ -2141,7 +2141,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
case EFX_FARCH_FILTER_TCP_FULL:
case EFX_FARCH_FILTER_UDP_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FARCH_FILTER_TCP_WILD:
case EFX_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2;
@@ -2185,7 +2185,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
case EFX_FARCH_FILTER_MAC_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
if (is_full)
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 5a74d880b733..1523be77b9db 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -140,7 +140,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
switch (encap_type & EFX_ENCAP_TYPES_MASK) {
case EFX_ENCAP_TYPE_VXLAN:
vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
- /* fallthrough */
+ fallthrough;
case EFX_ENCAP_TYPE_GENEVE:
COPY_VALUE(ether_type, ETHER_TYPE);
outer_ip_proto = IPPROTO_UDP;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 56af8b54a864..714d7f937212 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -282,7 +282,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
break;
default:
WARN_ON(1);
- /* Fall through */
+ fallthrough;
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 7bb7ecb480ae..062462a13847 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -846,6 +846,7 @@ struct efx_async_filter_insertion {
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irqs_hooked: Channel interrupts are hooked
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -1004,6 +1005,7 @@ struct efx_nic {
unsigned int timer_quantum_ns;
unsigned int timer_max_ns;
bool irq_rx_adaptive;
+ bool irqs_hooked;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
@@ -1265,6 +1267,7 @@ struct efx_udp_tunnel {
* @rx_write: Write RX descriptors and doorbell
* @rx_defer_refill: Generate a refill reminder event
* @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
* @ev_probe: Allocate resources for event queue
* @ev_init: Initialise event queue on the NIC
* @ev_fini: Deinitialise event queue on the NIC
@@ -1409,6 +1412,7 @@ struct efx_nic_type {
void (*rx_write)(struct efx_rx_queue *rx_queue);
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
void (*rx_packet)(struct efx_channel *channel);
+ bool (*rx_buf_hash_valid)(const u8 *prefix);
int (*ev_probe)(struct efx_channel *channel);
int (*ev_init)(struct efx_channel *channel);
void (*ev_fini)(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d994d136bb03..d1e908846f5d 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -129,6 +129,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
#endif
}
+ efx->irqs_hooked = true;
return 0;
fail2:
@@ -154,6 +155,8 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
efx->net_dev->rx_cpu_rmap = NULL;
#endif
+ if (!efx->irqs_hooked)
+ return;
if (EFX_INT_MODE_USE_MSI(efx)) {
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
@@ -163,6 +166,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
}
+ efx->irqs_hooked = false;
}
/* Register dump */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 59a43d586967..aaa112877561 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -358,7 +358,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
case XDP_ABORTED:
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
- /* Fall through */
+ fallthrough;
case XDP_DROP:
efx_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_drops++;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fb77c7bbe4af..5e29284c89c9 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -525,7 +525,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
return;
}
- if (efx->net_dev->features & NETIF_F_RXHASH)
+ if (efx->net_dev->features & NETIF_F_RXHASH &&
+ efx_rx_buf_hash_valid(efx, eh))
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
if (csum) {
@@ -848,6 +849,7 @@ void efx_remove_filters(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id);
+ channel->rps_flow_id = NULL;
}
#endif
down_write(&efx->filter_sem);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 336105f77313..cfa460c7db23 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2228,7 +2228,7 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = sis_priv->mii->phy_addr;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 186c0bddbe5f..01069dfaf75c 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -712,7 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev)
/* Found an external PHY */
break;
}
- /* Else, fall through */
+ fallthrough;
default:
/* Internal media only */
SMC_GET_PHY_ID1(lp, 1, id1);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 25db667fa879..806eb651cea3 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -919,10 +919,10 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
page = virt_to_head_page(xdp->data);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index d0d2d0fc5f0a..08c76636c164 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -84,9 +84,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
return ERR_PTR(err);
switch (phy_mode) {
- case PHY_INTERFACE_MODE_RGMII: /* Fall through */
- case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
- case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */
+ case PHY_INTERFACE_MODE_RGMII:
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 02102c781a8c..bf3250e0e59c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -351,6 +351,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->multicast_filter_bins = 0;
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (err)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index efc6ec1b8027..fc8759f146c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -164,6 +164,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
} else if (dev->flags & IFF_ALLMULTI) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
+ /* Fall back to all multicast if we've no filter */
+ value = GMAC_FRAME_FILTER_PM;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index e113b1376fdd..bf195adee393 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
ret = phy_loopback(dev->phydev, true);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case STMMAC_LOOPBACK_MAC:
ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
break;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
ret = phy_loopback(dev->phydev, false);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case STMMAC_LOOPBACK_MAC:
stmmac_set_mac_loopback(priv, priv->ioaddr, false);
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 3d747846f482..cc27d660a818 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -228,7 +228,7 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
switch (cls->command) {
case TC_CLSU32_REPLACE_KNODE:
tc_unfill_entry(priv, cls);
- /* Fall through */
+ fallthrough;
case TC_CLSU32_NEW_KNODE:
return tc_config_knode(priv, cls);
case TC_CLSU32_DELETE_KNODE:
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index e2bc7a25f6d1..b624e177ec71 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4759,7 +4759,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
- /* Fallthrough... */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irqsave(&cp->lock, flags);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9b5effb72657..68695d4afacd 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8835,7 +8835,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent)
else
goto unknown_vg_1g_port;
- /* fallthru */
+ fallthrough;
case 0x22:
val = (phy_encode(PORT_TYPE_10G, 0) |
phy_encode(PORT_TYPE_10G, 1) |
@@ -8860,7 +8860,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent)
else
goto unknown_vg_1g_port;
- /* fallthru */
+ fallthrough;
case 0x13:
if ((lowest_10g & 0x7) == 0)
val = (phy_encode(PORT_TYPE_10G, 0) |
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index eeb8518c8a84..8deb943ca5de 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2712,7 +2712,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = gp->mii_phy_addr;
- /* Fallthrough... */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 4e184eecc8e1..6e72ecbe5cf7 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -67,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
dev_warn(priv->dev,
"Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
phy_modes(phy_mode));
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
@@ -122,7 +122,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
dev_warn(priv->dev,
"Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
phy_modes(phy_mode));
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index d6d7a7d9c7ad..482a1a451e43 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1371,10 +1371,10 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
goto drop;
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 58623e974a0c..76a342ea3797 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -948,7 +948,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* get address of MII PHY in use. */
data->phy_id = phy;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* read MII PHY register. */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 2db546b27ee0..dc14a66583ff 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -877,7 +877,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
case IW_AUTH_KEY_MGMT:
if (param->value & IW_AUTH_KEY_MGMT_PSK)
break;
- /* intentionally fall through */
+ fallthrough;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 07389702a540..5f5b33e6653b 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -786,7 +786,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
- /* Fall through */
+ fallthrough;
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
@@ -1397,9 +1397,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
show_error = 0;
break;
- case SPIDER_NET_GDDDEN0INT: /* fallthrough */
- case SPIDER_NET_GDCDEN0INT: /* fallthrough */
- case SPIDER_NET_GDBDEN0INT: /* fallthrough */
+ case SPIDER_NET_GDDDEN0INT:
+ case SPIDER_NET_GDCDEN0INT:
+ case SPIDER_NET_GDBDEN0INT:
case SPIDER_NET_GDADEN0INT:
/* someone has set RX_DMA_EN to 0 */
show_error = 0;
@@ -1449,10 +1449,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
* Logging is not needed. */
show_error = 0;
break;
- case SPIDER_NET_GRFDFLLINT: /* fallthrough */
- case SPIDER_NET_GRFCFLLINT: /* fallthrough */
- case SPIDER_NET_GRFBFLLINT: /* fallthrough */
- case SPIDER_NET_GRFAFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFDFLLINT:
+ case SPIDER_NET_GRFCFLLINT:
+ case SPIDER_NET_GRFBFLLINT:
+ case SPIDER_NET_GRFAFLLINT:
case SPIDER_NET_GRMFLLINT:
/* Could happen when rx chain is full */
if (card->ignore_rx_ramfull == 0) {
@@ -1473,9 +1473,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
break;
/* chain end */
- case SPIDER_NET_GDDDCEINT: /* fallthrough */
- case SPIDER_NET_GDCDCEINT: /* fallthrough */
- case SPIDER_NET_GDBDCEINT: /* fallthrough */
+ case SPIDER_NET_GDDDCEINT:
+ case SPIDER_NET_GDCDCEINT:
+ case SPIDER_NET_GDBDCEINT:
case SPIDER_NET_GDADCEINT:
spider_net_resync_head_ptr(card);
spider_net_refill_rx_chain(card);
@@ -1486,9 +1486,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
break;
/* invalid descriptor */
- case SPIDER_NET_GDDINVDINT: /* fallthrough */
- case SPIDER_NET_GDCINVDINT: /* fallthrough */
- case SPIDER_NET_GDBINVDINT: /* fallthrough */
+ case SPIDER_NET_GDDINVDINT:
+ case SPIDER_NET_GDCINVDINT:
+ case SPIDER_NET_GDBINVDINT:
case SPIDER_NET_GDAINVDINT:
/* Could happen when rx chain is full */
spider_net_resync_head_ptr(card);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 3e3883ad88b0..3e337142b516 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1434,7 +1434,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data->phy_id = 0; /* we have only this address */
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read the specified MII register. */
data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f,
data->reg_num & 0x1f);
diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c
index e9bf42996de8..4eea3408034b 100644
--- a/drivers/net/fddi/skfp/cfm.c
+++ b/drivers/net/fddi/skfp/cfm.c
@@ -36,10 +36,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
@@ -208,7 +204,6 @@ void cfm(struct s_smc *smc, int event)
{
int state ; /* remember last state */
int cond ;
- int oldstate ;
/* We will do the following: */
/* - compute the variable WC_Flag for every port (This is where */
@@ -222,7 +217,6 @@ void cfm(struct s_smc *smc, int event)
/* - change the portstates */
cem_priv_state (smc, event);
- oldstate = smc->mib.fddiSMTCF_State ;
do {
DB_CFM("CFM : state %s%s event %s",
smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
@@ -250,18 +244,11 @@ void cfm(struct s_smc *smc, int event)
if (cond != smc->mib.fddiSMTPeerWrapFlag)
smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
-#if 0
/*
- * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
+ * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired
* to the primary path.
*/
- /*
- * path change
- */
- if (smc->mib.fddiSMTCF_State != oldstate) {
- smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
- }
-#endif
+
#endif /* no SLIM_SMT */
/*
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index 02966d141948..4cbb145c74ab 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -21,10 +21,6 @@
#include <linux/bitrev.h>
#include <linux/etherdevice.h>
-#ifndef lint
-static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
-#endif
-
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index 3412e0fb0ac4..107039056511 100644
--- a/drivers/net/fddi/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
@@ -10,10 +10,6 @@
*
******************************************************************************/
-#ifndef lint
-static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
-#endif
-
#define HWMTM
#ifndef FDDI
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 1be039579d70..554cde8d6073 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -847,7 +847,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
- /* fall through */
+ fallthrough;
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
@@ -946,7 +946,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
ACTIONS_DONE() ;
cmd = 0 ;
- /* fall thru */
+ fallthrough;
case PC6_JOIN :
switch (plc->p_state) {
case PS_ACTIVE:
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index b8c59d803ce6..774a6e3b0a67 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -20,10 +20,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
-#endif
-
/*
* FC in SMbuf
*/
@@ -1561,7 +1557,7 @@ u_long smt_get_tid(struct s_smc *smc)
return tid & 0x3fffffffL;
}
-
+#ifdef LITTLE_ENDIAN
/*
* table of parameter lengths
*/
@@ -1641,6 +1637,7 @@ static const struct smt_pdef {
} ;
#define N_SMT_PLEN ARRAY_SIZE(smt_pdef)
+#endif
int smt_check_para(struct s_smc *smc, struct smt_header *sm,
const u_short list[])
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 8c810edece86..466622664424 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -974,7 +974,7 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
- /* fall through */
+ fallthrough;
case EP_PARTNER_UNSHARE:
case EP_PARTNER_COMPLETE:
default:
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 4476491b58f9..e4e4981ac1d2 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -500,7 +500,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
}
break;
}
- /* fall through */
+ fallthrough;
default:
if (bc->hdlctx.calibrate <= 0)
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index deef14215110..17be2bb2985c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -482,7 +482,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_SMACK_TEST:
ax->crcmode = CRC_MODE_FLEX_TEST;
printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
- // fall through
+ fallthrough;
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
@@ -491,7 +491,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
- // fall through
+ fallthrough;
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
@@ -744,7 +744,6 @@ static int mkiss_open(struct tty_struct *tty)
ax->dev->name);
break;
case 0:
- /* fall through */
default:
crc_force = 0;
printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 787f17e2a971..64b0a74c1523 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -367,7 +367,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
rcu_read_unlock();
- while (unlikely(txq >= ndev->real_num_tx_queues))
+ while (txq >= ndev->real_num_tx_queues)
txq -= ndev->real_num_tx_queues;
return txq;
@@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
int rc;
skb->dev = vf_netdev;
- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
+ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
rc = dev_queue_xmit(skb);
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index b10a85392952..55115cfb2972 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_wakeup.h>
+#include <linux/notifier.h>
#include "ipa_version.h"
#include "gsi.h"
@@ -73,6 +74,8 @@ struct ipa {
enum ipa_version version;
struct platform_device *pdev;
struct rproc *modem_rproc;
+ struct notifier_block nb;
+ void *notifier;
struct ipa_smp2p *smp2p;
struct ipa_clock *clock;
atomic_t suspend_ref;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index ed10818dd99f..e34fe2d77324 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -9,7 +9,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
#include "ipa_data.h"
@@ -311,43 +311,40 @@ static void ipa_modem_crashed(struct ipa *ipa)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
}
-static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
{
- struct ipa *ipa = data;
- struct device *dev;
+ struct ipa *ipa = container_of(nb, struct ipa, nb);
+ struct qcom_ssr_notify_data *notify_data = data;
+ struct device *dev = &ipa->pdev->dev;
- dev = &ipa->pdev->dev;
- switch (event) {
- case MODEM_STARTING:
+ switch (action) {
+ case QCOM_SSR_BEFORE_POWERUP:
dev_info(dev, "received modem starting event\n");
ipa_smp2p_notify_reset(ipa);
break;
- case MODEM_RUNNING:
+ case QCOM_SSR_AFTER_POWERUP:
dev_info(dev, "received modem running event\n");
break;
- case MODEM_STOPPING:
- case MODEM_CRASHED:
+ case QCOM_SSR_BEFORE_SHUTDOWN:
dev_info(dev, "received modem %s event\n",
- event == MODEM_STOPPING ? "stopping"
- : "crashed");
+ notify_data->crashed ? "crashed" : "stopping");
if (ipa->setup_complete)
ipa_modem_crashed(ipa);
break;
- case MODEM_OFFLINE:
+ case QCOM_SSR_AFTER_SHUTDOWN:
dev_info(dev, "received modem offline event\n");
break;
- case MODEM_REMOVING:
- dev_info(dev, "received modem stopping event\n");
- break;
-
default:
- dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+ dev_err(dev, "received unrecognized event %lu\n", action);
break;
}
+
+ return NOTIFY_OK;
}
int ipa_modem_init(struct ipa *ipa, bool modem_init)
@@ -362,13 +359,30 @@ void ipa_modem_exit(struct ipa *ipa)
int ipa_modem_config(struct ipa *ipa)
{
- return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
- ipa);
+ void *notifier;
+
+ ipa->nb.notifier_call = ipa_modem_notify;
+
+ notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
+ if (IS_ERR(notifier))
+ return PTR_ERR(notifier);
+
+ ipa->notifier = notifier;
+
+ return 0;
}
void ipa_modem_deconfig(struct ipa *ipa)
{
- qcom_deregister_ipa_notify(ipa->modem_rproc);
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
+ if (ret)
+ dev_err(dev, "error %d unregistering notifier", ret);
+
+ ipa->notifier = NULL;
+ memset(&ipa->nb, 0, sizeof(ipa->nb));
}
int ipa_modem_setup(struct ipa *ipa)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 15e87c097b0b..5bca94c99006 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
kfree(port);
}
+#define IPVLAN_ALWAYS_ON_OFLOADS \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | \
+ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
+
+#define IPVLAN_ALWAYS_ON \
+ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+
#define IPVLAN_FEATURES \
- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
+
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+ dev->features |= IPVLAN_ALWAYS_ON;
+ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
+ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
@@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features = netdev_increment_features(ipvlan->phy_dev->features,
+ features, features);
+ features |= IPVLAN_ALWAYS_ON;
+ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
+
+ return features;
}
static void ipvlan_change_rx_flags(struct net_device *dev, int change)
@@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
ipvlan->dev->gso_max_size = dev->gso_max_size;
ipvlan->dev->gso_max_segs = dev->gso_max_segs;
- netdev_features_change(ipvlan->dev);
+ netdev_update_features(ipvlan->dev);
}
break;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 4942f6112e51..c8d803d3616c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -842,7 +842,7 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (!net_eq(dev_net(dev), &init_net))
break;
- /* fall through */
+ fallthrough;
case SIOCGHWTSTAMP:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
@@ -1269,6 +1269,9 @@ static void macvlan_port_destroy(struct net_device *dev)
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct nlattr *nla, *head;
+ int rem, len;
+
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
@@ -1316,6 +1319,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return -EADDRNOTAVAIL;
}
+ if (data[IFLA_MACVLAN_MACADDR_DATA]) {
+ head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
+ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
+ nla_len(nla) != ETH_ALEN)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(nla_data(nla)))
+ return -EADDRNOTAVAIL;
+ }
+ }
+
if (data[IFLA_MACVLAN_MACADDR_COUNT])
return -EINVAL;
@@ -1372,10 +1389,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
nla_for_each_attr(nla, head, len, rem) {
- if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
- nla_len(nla) != ETH_ALEN)
- continue;
-
addr = nla_data(nla);
ret = macvlan_hash_add_source(vlan, addr);
if (ret)
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 44612122338b..f6a97c859f3a 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -597,7 +597,7 @@ int generic_mii_ioctl(struct mii_if_info *mii_if,
switch(cmd) {
case SIOCGMIIPHY:
mii_data->phy_id = mii_if->phy_id;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
mii_data->val_out =
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 7971dc4f54f1..0e9511661601 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -193,7 +193,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
switch (err) {
case 1:
port_count = 1;
- /* fall through */
+ fallthrough;
case 2:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index f32d56ac3e80..deea17a0e79c 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -760,14 +760,14 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
spin_lock_bh(&data->fib_lock);
switch (event) {
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = nsim_fib_rule_event(data, info,
event == FIB_EVENT_RULE_ADD);
break;
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
err = nsim_fib_event(data, info, event);
break;
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 7471a8b90873..307f0ac1287b 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -366,10 +366,10 @@ static int adin_set_edpd(struct phy_device *phydev, u16 tx_interval)
switch (tx_interval) {
case 1000: /* 1 second */
- /* fallthrough */
+ fallthrough;
case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
val |= ADIN1300_NRG_PD_TX_EN;
- /* fallthrough */
+ fallthrough;
case ETHTOOL_PHY_EDPD_NO_TX:
break;
default:
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 50fb7d16b75a..79e67f2fe00a 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -766,13 +766,13 @@ static int decode_evnt(struct dp83640_private *dp83640,
switch (words) {
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
- /* fall through */
+ fallthrough;
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
- /* fall through */
+ fallthrough;
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
- /* fall through */
+ fallthrough;
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
@@ -1409,7 +1409,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
kfree_skb(skb);
return;
}
- /* fall through */
+ fallthrough;
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c4641b1704d6..18d81f43f2a8 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -279,13 +279,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
phy->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
phy->supported);
- /* fall through */
+ fallthrough;
case SPEED_100:
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
phy->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
phy->supported);
- /* fall through */
+ fallthrough;
case SPEED_10:
default:
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index a7610eb55f30..1901ba277413 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -208,13 +208,6 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
MV_V2_TEMP_CTRL_MASK, val);
}
-static void mv3310_hwmon_disable(void *data)
-{
- struct phy_device *phydev = data;
-
- mv3310_hwmon_config(phydev, false);
-}
-
static int mv3310_hwmon_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -238,10 +231,6 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
if (ret)
return ret;
- ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev);
- if (ret)
- return ret;
-
priv->hwmon_dev = devm_hwmon_device_register_with_info(dev,
priv->hwmon_name, phydev,
&mv3310_hwmon_chip_info, NULL);
@@ -426,6 +415,11 @@ static int mv3310_probe(struct phy_device *phydev)
return phy_sfp_probe(phydev, &mv3310_sfp_ops);
}
+static void mv3310_remove(struct phy_device *phydev)
+{
+ mv3310_hwmon_config(phydev, false);
+}
+
static int mv3310_suspend(struct phy_device *phydev)
{
return mv3310_power_down(phydev);
@@ -784,6 +778,7 @@ static struct phy_driver mv3310_drivers[] = {
.read_status = mv3310_read_status,
.get_tunable = mv3310_get_tunable,
.set_tunable = mv3310_set_tunable,
+ .remove = mv3310_remove,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -798,6 +793,7 @@ static struct phy_driver mv3310_drivers[] = {
.read_status = mv3310_read_status,
.get_tunable = mv3310_get_tunable,
.set_tunable = mv3310_set_tunable,
+ .remove = mv3310_remove,
},
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index a4fbf3a4fa97..6bc7406a1ce7 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1738,13 +1738,13 @@ static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
return 0;
}
-/* Trigger a read to the spcified MCB */
+/* Trigger a read to the specified MCB */
static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
}
-/* Trigger a write to the spcified MCB */
+/* Trigger a write to the specified MCB */
static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 79b4f35d151e..735a806045ac 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -355,7 +355,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii_data->phy_id = phydev->mdio.addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
@@ -433,7 +433,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1b9523595839..8adfbad0a1e8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
if (c45_ids)
dev->c45_ids = *c45_ids;
dev->irq = bus->irq[addr];
+
dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
+ device_initialize(&mdiodev->dev);
dev->state = PHY_DOWN;
@@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
ret = phy_request_driver_module(dev, phy_id);
}
- if (!ret) {
- device_initialize(&mdiodev->dev);
- } else {
- kfree(dev);
+ if (ret) {
+ put_device(&mdiodev->dev);
dev = ERR_PTR(ret);
}
@@ -1979,7 +1979,7 @@ static int genphy_setup_master_slave(struct phy_device *phydev)
break;
case MASTER_SLAVE_CFG_MASTER_FORCE:
ctl |= CTL1000_AS_MASTER;
- /* fallthrough */
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
ctl |= CTL1000_ENABLE_MASTER;
break;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 32b4bd6a5b55..32f4e8ec96cf 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1905,7 +1905,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1928,7 +1928,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 6900c68260e0..58014feedf6c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -149,7 +149,7 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
port = PORT_TP;
break;
}
- /* fallthrough */
+ fallthrough;
case SFF8024_CONNECTOR_SG: /* guess */
case SFF8024_CONNECTOR_HSSDC_II:
case SFF8024_CONNECTOR_NOSEPARATE:
@@ -301,7 +301,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
case SFF8024_ECC_100GBASE_CR4:
phylink_set(modes, 100000baseCR4_Full);
- /* fallthrough */
+ fallthrough;
case SFF8024_ECC_25GBASE_CR_S:
case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c24b0e83dd32..cf83314c8591 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -552,7 +552,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_temp_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_temp_input:
case hwmon_temp_label:
return 0444;
@@ -571,7 +571,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_in_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_in_input:
case hwmon_in_label:
return 0444;
@@ -590,7 +590,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_curr_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_curr_input:
case hwmon_curr_label:
return 0444;
@@ -618,7 +618,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_power_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_power_input:
case hwmon_power_label:
return 0444;
@@ -1872,7 +1872,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
- /* fall through */
+ fallthrough;
case SFP_MOD_WAITDEV:
/* Ensure that the device is attached before proceeding */
if (sfp->sm_dev_state < SFP_DEV_DOWN)
@@ -1890,7 +1890,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
goto insert;
sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0);
- /* fall through */
+ fallthrough;
case SFP_MOD_HPOWER:
/* Enable high power mode */
err = sfp_sm_mod_hpower(sfp, true);
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index d82016dcde3b..4406b353123e 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -498,7 +498,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev,
*data_p = (c0 >> 3) & 0x0f;
write_data (dev, 0x10); /* send ACK */
*ns_p = PLIP_NB_1;
- /* fall through */
+ fallthrough;
case PLIP_NB_1:
cx = nibble_timeout;
@@ -594,7 +594,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
printk(KERN_DEBUG "%s: receive start\n", dev->name);
rcv->state = PLIP_PK_LENGTH_LSB;
rcv->nibble = PLIP_NB_BEGIN;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_LSB:
if (snd->state != PLIP_PK_DONE) {
@@ -615,7 +615,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
return TIMEOUT;
}
rcv->state = PLIP_PK_LENGTH_MSB;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_receive(nibble_timeout, dev,
@@ -638,7 +638,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
rcv->state = PLIP_PK_DATA;
rcv->byte = 0;
rcv->checksum = 0;
- /* fall through */
+ fallthrough;
case PLIP_PK_DATA:
lbuf = rcv->skb->data;
@@ -651,7 +651,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
rcv->checksum += lbuf[--rcv->byte];
} while (rcv->byte);
rcv->state = PLIP_PK_CHECKSUM;
- /* fall through */
+ fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_receive(nibble_timeout, dev,
@@ -664,7 +664,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
return ERROR;
}
rcv->state = PLIP_PK_DONE;
- /* fall through */
+ fallthrough;
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
@@ -710,7 +710,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev,
case PLIP_NB_BEGIN:
write_data (dev, data & 0x0f);
*ns_p = PLIP_NB_1;
- /* fall through */
+ fallthrough;
case PLIP_NB_1:
write_data (dev, 0x10 | (data & 0x0f));
@@ -725,7 +725,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev,
}
write_data (dev, 0x10 | (data >> 4));
*ns_p = PLIP_NB_2;
- /* fall through */
+ fallthrough;
case PLIP_NB_2:
write_data (dev, (data >> 4));
@@ -814,7 +814,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
&snd->nibble, snd->length.b.lsb))
return TIMEOUT;
snd->state = PLIP_PK_LENGTH_MSB;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_send(nibble_timeout, dev,
@@ -823,7 +823,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
snd->state = PLIP_PK_DATA;
snd->byte = 0;
snd->checksum = 0;
- /* fall through */
+ fallthrough;
case PLIP_PK_DATA:
do {
@@ -835,7 +835,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
snd->checksum += lbuf[--snd->byte];
} while (snd->byte);
snd->state = PLIP_PK_CHECKSUM;
- /* fall through */
+ fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_send(nibble_timeout, dev,
@@ -846,7 +846,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
dev_kfree_skb(snd->skb);
dev->stats.tx_packets++;
snd->state = PLIP_PK_DONE;
- /* fall through */
+ fallthrough;
case PLIP_PK_DONE:
/* Close the connection */
@@ -935,7 +935,7 @@ plip_interrupt(void *dev_id)
switch (nl->connection) {
case PLIP_CN_CLOSING:
netif_wake_queue (dev);
- /* fall through */
+ fallthrough;
case PLIP_CN_NONE:
case PLIP_CN_SEND:
rcv->state = PLIP_PK_TRIGGER;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3c11a77f5709..7959b5c2d11f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1590,10 +1590,10 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(tun->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
break;
@@ -2417,7 +2417,7 @@ static int tun_xdp_one(struct tun_struct *tun,
switch (err) {
case XDP_REDIRECT:
*flush = true;
- /* fall through */
+ fallthrough;
case XDP_TX:
return 0;
case XDP_PASS:
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 7e44110746dd..0717c18015c9 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -333,13 +333,13 @@ static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed)
switch (speed) {
case SPEED_5000:
aqc111_data->phy_cfg |= AQ_ADV_5G;
- /* fall-through */
+ fallthrough;
case SPEED_2500:
aqc111_data->phy_cfg |= AQ_ADV_2G5;
- /* fall-through */
+ fallthrough;
case SPEED_1000:
aqc111_data->phy_cfg |= AQ_ADV_1G;
- /* fall-through */
+ fallthrough;
case SPEED_100:
aqc111_data->phy_cfg |= AQ_ADV_100M;
/* fall-through */
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d387bc7ac1b6..97ba67042d12 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -858,7 +858,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
default:
dev_warn(&intf->dev,
"Couldn't detect memory size, assuming 32k\n");
- /* fall through */
+ fallthrough;
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 9bdbd7b472a0..dba847f28096 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -97,7 +97,7 @@ static void tx_complete(struct urb *req)
case -ECONNRESET:
case -ESHUTDOWN:
dev->stats.tx_aborted_errors++;
- /* fall through */
+ fallthrough;
default:
dev->stats.tx_errors++;
dev_dbg(&dev->dev, "TX error (%d)\n", status);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 442507f25aad..65b315bc60ab 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3192,7 +3192,7 @@ static void rx_complete(struct urb *urb)
case -EPIPE:
dev->net->stats.rx_errors++;
lan78xx_defer_kevent(dev, EVENT_RX_HALT);
- /* FALLTHROUGH */
+ fallthrough;
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware gone */
netif_dbg(dev, ifdown, dev->net,
@@ -3213,7 +3213,7 @@ static void rx_complete(struct urb *urb)
/* data overrun ... flush fifo? */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
- /* FALLTHROUGH */
+ fallthrough;
default:
state = rx_cleanup;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 0ef7e1f443e3..e92cb51a2c77 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -629,7 +629,7 @@ static void write_bulk_callback(struct urb *urb)
return;
default:
netif_info(pegasus, tx_err, net, "TX status %d\n", status);
- /* FALL THROUGH */
+ fallthrough;
case 0:
break;
}
@@ -1009,7 +1009,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = pegasus->phy;
- /* fall through */
+ fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7d39f998535d..b1770489aca5 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1504,7 +1504,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
sa->sa_family = dev->type;
- ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data);
+ ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
if (ret < 0) {
if (tp->version == RTL_VER_01) {
ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
@@ -1682,7 +1682,7 @@ static void intr_callback(struct urb *urb)
case -ECONNRESET: /* unlink */
case -ESHUTDOWN:
netif_device_detach(tp->netdev);
- /* fall through */
+ fallthrough;
case -ENOENT:
case -EPROTO:
netif_info(tp, intr, tp->netdev,
@@ -3251,7 +3251,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
r8152_mdio_write(tp, MII_BMCR, data);
data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
- /* fall through */
+ fallthrough;
default:
if (data != PHY_STAT_LAN_ON)
@@ -4849,7 +4849,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
tp->ups_info.speed_duplex = NWAY_1000M_FULL;
break;
}
- /* fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index e7c630d37589..733f120c852b 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -843,7 +843,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = dev->phy;
- /* fall through */
+ fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]);
break;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index e45935a5856a..2b2a841cd938 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -110,7 +110,7 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
if (!usb_endpoint_dir_in(&e->desc))
continue;
intr = 1;
- /* FALLTHROUGH */
+ fallthrough;
case USB_ENDPOINT_XFER_BULK:
break;
default:
@@ -628,7 +628,7 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
- // FALLTHROUGH
+ fallthrough;
default:
state = rx_cleanup;
@@ -1530,7 +1530,7 @@ static void usbnet_bh (struct timer_list *t)
continue;
case tx_done:
kfree(entry->urb->sg);
- /* fall through */
+ fallthrough;
case rx_cleanup:
usb_free_urb (entry->urb);
dev_kfree_skb (skb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e56cd562a664..a475f48d43c4 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -610,10 +610,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
stats->xdp_drops++;
goto err_xdp;
@@ -745,10 +745,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
stats->xdp_drops++;
goto xdp_drop;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fa8fe5ef160..263b005981bd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -724,7 +724,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
@@ -922,10 +922,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
@@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
return;
- speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
- speed));
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
if (ethtool_validate_speed(speed))
vi->speed = speed;
- duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
- duplex));
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
if (ethtool_validate_duplex(duplex))
vi->duplex = duplex;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ca395f9679d0..2818015324b8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -886,7 +886,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
switch (protocol) {
case IPPROTO_TCP:
- ctx->l4_hdr_size = tcp_hdrlen(skb);
+ ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
+ tcp_hdrlen(skb);
break;
case IPPROTO_UDP:
ctx->l4_hdr_size = sizeof(struct udphdr);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index def27afa1c69..1014693a5ceb 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -743,7 +743,7 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
case ESP_V4_FLOW:
if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case SCTP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 7bcee41905cf..3ca4daf63389 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -295,14 +295,13 @@ static int dlci_close(struct net_device *dev)
{
struct dlci_local *dlp;
struct frad_local *flp;
- int err;
netif_stop_queue(dev);
dlp = netdev_priv(dev);
flp = netdev_priv(dlp->slave);
- err = (*flp->deactivate)(dlp->slave, dev);
+ (*flp->deactivate)(dlp->slave, dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index dfc16770458d..386ed2aa31fd 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -230,6 +230,7 @@ static void hdlc_setup_dev(struct net_device *dev)
dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
+ dev->needed_headroom = 0;
dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops;
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index f70336bb6f52..f52b9fed0593 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
int result;
+ /* There should be a pseudo header of 1 byte added by upper layers.
+ * Check to make sure it is there before reading it.
+ */
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- /* X.25 to LAPB */
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
@@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
+
+ /* There's no header_ops so hard_header_len should be 0. */
+ dev->hard_header_len = 0;
+ /* When transmitting data:
+ * first we'll remove a pseudo header of 1 byte,
+ * then we'll prepend an LAPB header of at most 3 bytes.
+ */
+ dev->needed_headroom = 3 - 1;
+
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index b2868433718f..8ccd086e06cb 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
if (!netif_running(dev))
goto drop;
+ /* There should be a pseudo header of 1 byte added by upper layers.
+ * Check to make sure it is there before reading it.
+ */
+ if (skb->len < 1)
+ goto drop;
+
switch (skb->data[0]) {
case X25_IFACE_DATA:
break;
@@ -167,7 +173,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
pr_err("lapb_disconnect_request err: %d\n", err);
- /* Fall thru */
+ fallthrough;
default:
goto drop;
}
@@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev)
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
+ dev->hard_header_len = 0;
dev->mtu = 1000;
dev->addr_len = 0;
}
@@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev)
* then this driver prepends a length field of 2 bytes,
* then the underlying Ethernet device prepends its own header.
*/
- ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ + dev->needed_headroom;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 77ccf3672ede..bc2c1c7fb1a4 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -413,7 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
case SDLA_RET_NO_BUFS:
if (cmd == SDLA_INFORMATION_WRITE)
break;
- /* Else, fall through */
+ fallthrough;
default:
netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 84640a0c13f3..7ee980575208 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -307,6 +307,14 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* There should be a pseudo header of 1 byte added by upper layers.
+ * Check to make sure it is there before reading it.
+ */
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
switch (skb->data[0]) {
case X25_IFACE_DATA:
break;
@@ -322,7 +330,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
if (err != LAPB_OK)
netdev_err(dev, "lapb_disconnect_request error: %d\n",
err);
- /* fall through */
+ fallthrough;
default:
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -752,6 +760,12 @@ static void x25_asy_setup(struct net_device *dev)
dev->type = ARPHRD_X25;
dev->tx_queue_len = 10;
+ /* When transmitting data:
+ * first this driver removes a pseudo header of 1 byte,
+ * then the lapb module prepends an LAPB header of at most 3 bytes.
+ */
+ dev->needed_headroom = 3 - 1;
+
/* New-style flags. */
dev->flags = IFF_NOARP;
}
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4fe7c7e132c4..9afed3b133d3 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -352,7 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
case I2400M_SS_IDLE:
d_printf(1, dev, "entering BS-negotiated idle mode\n");
- /* Fall through */
+ fallthrough;
case I2400M_SS_DISCONNECTING:
case I2400M_SS_DATA_PATH_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 1f7709d24f35..27ab233650d5 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -135,7 +135,7 @@ retry:
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index 3a0e7226768a..3ba9d70cca1b 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -136,7 +136,7 @@ retry:
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 9659f9e1aaa6..b684e97ac976 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -195,7 +195,7 @@ retry:
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
index 7948a2da195a..2ff00800d45b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
@@ -150,17 +150,17 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
-static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
+static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, const u8 __iomem *addr)
{
return ioread8(addr);
}
-static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, __le16 __iomem *addr)
+static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, const __le16 __iomem *addr)
{
return ioread16(addr);
}
-static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, __le32 __iomem *addr)
+static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, const __le32 __iomem *addr)
{
return ioread32(addr);
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 6b7532f7c936..ff96f22648ef 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -393,7 +393,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
- /* FALLTHRU */
+ fallthrough;
default:
return;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7e62a6ee7622..f1c1624cec8f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -448,7 +448,7 @@ static void frontend_changed(struct xenbus_device *dev,
set_backend_state(be, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
set_backend_state(be, XenbusStateClosed);
device_unregister(&dev->dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 458be6882b98..3e9895bec15f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2341,7 +2341,7 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 346e084387f7..f7464bd6d57c 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2321,7 +2321,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
break;
}
- /* fall through */
+ fallthrough;
default:
/* jumbo frame ? */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
@@ -2448,7 +2448,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
break;
}
- /* fall through */
+ fallthrough;
default:
skb_put_u8(skb, 1); /*TG*/
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 0b9ca6d20ffa..8874d605b14f 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -611,7 +611,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb,
switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) {
case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU:
pr_err("Received a ACK/NACK PDU\n");
- /* fall through */
+ fallthrough;
case ST21NFCA_NFC_DEP_PFB_I_PDU:
info->dep_info.curr_nfc_dep_pni =
ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index e46adaac1c63..3bd97c73f983 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -1153,7 +1153,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
dev_err(trf->dev, "%s - Invalid request: %d %d\n",
__func__, trf->state, on);
ret = -EINVAL;
- /* FALLTHROUGH */
+ fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
@@ -1960,7 +1960,7 @@ static void trf7970a_shutdown(struct trf7970a *trf)
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
case TRF7970A_ST_LISTENING:
trf7970a_send_err_upstream(trf, -ECANCELED);
- /* FALLTHROUGH */
+ fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
trf7970a_switch_rf_off(trf);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 423f9b8fbbcf..3185efeab487 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -1205,7 +1205,7 @@ int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
ndev->peer_reg->spad);
}
-static u64 xeon_db_ioread(void __iomem *mmio)
+static u64 xeon_db_ioread(const void __iomem *mmio)
{
return (u64)ioread16(mmio);
}
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h
index 2bc5d8356045..dea93989942d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h
@@ -91,7 +91,7 @@
#define GEN3_DB_TOTAL_SHIFT 33
#define GEN3_SPAD_COUNT 16
-static inline u64 gen3_db_ioread(void __iomem *mmio)
+static inline u64 gen3_db_ioread(const void __iomem *mmio)
{
return ioread64(mmio);
}
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index d61fcd91714b..05e2335c9596 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -103,7 +103,7 @@ struct intel_ntb_dev;
struct intel_ntb_reg {
int (*poll_link)(struct intel_ntb_dev *ndev);
int (*link_is_up)(struct intel_ntb_dev *ndev);
- u64 (*db_ioread)(void __iomem *mmio);
+ u64 (*db_ioread)(const void __iomem *mmio);
void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
unsigned long ntb_ctl;
resource_size_t db_size;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e6d1f5b298f3..4a02561cfb96 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1483,7 +1483,7 @@ static void ntb_rx_copy_callback(void *data,
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
- /* fall through */
+ fallthrough;
case DMA_TRANS_ABORTED:
{
struct ntb_transport_qp *qp = entry->qp;
@@ -1739,7 +1739,7 @@ static void ntb_tx_copy_callback(void *data,
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
- /* fall through */
+ fallthrough;
case DMA_TRANS_ABORTED:
{
void __iomem *offset =
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 412d21d8f643..0ff610e728ff 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
- unsigned int len;
- len = hpage_nr_pages(page) * PAGE_SIZE;
- rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
+ rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
if (rc == 0)
page_endio(page, op_is_write(op), 0);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 09087c38fabd..955265656b96 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
dimm_name = "bus";
}
+ /* Validate command family support against bus declared support */
if (cmd == ND_CMD_CALL) {
+ unsigned long *mask;
+
if (copy_from_user(&pkg, p, sizeof(pkg)))
return -EFAULT;
+
+ if (nvdimm) {
+ if (pkg.nd_family > NVDIMM_FAMILY_MAX)
+ return -EINVAL;
+ mask = &nd_desc->dimm_family_mask;
+ } else {
+ if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
+ return -EINVAL;
+ mask = &nd_desc->bus_family_mask;
+ }
+
+ if (!test_bit(pkg.nd_family, mask))
+ return -EINVAL;
}
if (!desc ||
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index fe9bd6febdd2..c21ba0602029 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -4,6 +4,7 @@
*/
#include <linux/libnvdimm.h>
#include <linux/badblocks.h>
+#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
@@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ switch (cap) {
+ case NVDIMM_FWA_CAP_QUIESCE:
+ return sprintf(buf, "quiesce\n");
+ case NVDIMM_FWA_CAP_LIVE:
+ return sprintf(buf, "live\n");
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+ enum nvdimm_fwa_state state;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ state = nd_desc->fw_ops->activate_state(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case NVDIMM_FWA_IDLE:
+ return sprintf(buf, "idle\n");
+ case NVDIMM_FWA_BUSY:
+ return sprintf(buf, "busy\n");
+ case NVDIMM_FWA_ARMED:
+ return sprintf(buf, "armed\n");
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ return sprintf(buf, "overflow\n");
+ default:
+ return -ENXIO;
+ }
+}
+
+static int exec_firmware_activate(void *data)
+{
+ struct nvdimm_bus_descriptor *nd_desc = data;
+
+ return nd_desc->fw_ops->activate(nd_desc);
+}
+
+static ssize_t activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_state state;
+ bool quiesce;
+ ssize_t rc;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ if (sysfs_streq(buf, "live"))
+ quiesce = false;
+ else if (sysfs_streq(buf, "quiesce"))
+ quiesce = true;
+ else
+ return -EINVAL;
+
+ nvdimm_bus_lock(dev);
+ state = nd_desc->fw_ops->activate_state(nd_desc);
+
+ switch (state) {
+ case NVDIMM_FWA_BUSY:
+ rc = -EBUSY;
+ break;
+ case NVDIMM_FWA_ARMED:
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ if (quiesce)
+ rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
+ else
+ rc = nd_desc->fw_ops->activate(nd_desc);
+ break;
+ case NVDIMM_FWA_IDLE:
+ default:
+ rc = -ENXIO;
+ }
+ nvdimm_bus_unlock(dev);
+
+ if (rc == 0)
+ rc = len;
+ return rc;
+}
+
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+
+ /*
+ * Both 'activate' and 'capability' disappear when no ops
+ * detected, or a negative capability is indicated.
+ */
+ if (!nd_desc->fw_ops)
+ return 0;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return 0;
+
+ return a->mode;
+}
+static struct attribute *nvdimm_bus_firmware_attributes[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_capability.attr,
+ NULL,
+};
+
+static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
+ .name = "firmware",
+ .attrs = nvdimm_bus_firmware_attributes,
+ .is_visible = nvdimm_bus_firmware_visible,
+};
+
const struct attribute_group *nvdimm_bus_attribute_groups[] = {
&nvdimm_bus_attribute_group,
+ &nvdimm_bus_firmware_attribute_group,
NULL,
};
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index b7b77e8d9027..b59032e0859b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -363,14 +363,14 @@ __weak ssize_t security_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
+ if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
+ return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
- if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
- return sprintf(buf, "overwrite\n");
return -ENOTTY;
}
@@ -446,9 +446,125 @@ static const struct attribute_group nvdimm_attribute_group = {
.is_visible = nvdimm_visible,
};
+static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_result result;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ result = nvdimm->fw_ops->activate_result(nvdimm);
+ nvdimm_bus_unlock(dev);
+
+ switch (result) {
+ case NVDIMM_FWA_RESULT_NONE:
+ return sprintf(buf, "none\n");
+ case NVDIMM_FWA_RESULT_SUCCESS:
+ return sprintf(buf, "success\n");
+ case NVDIMM_FWA_RESULT_FAIL:
+ return sprintf(buf, "fail\n");
+ case NVDIMM_FWA_RESULT_NOTSTAGED:
+ return sprintf(buf, "not_staged\n");
+ case NVDIMM_FWA_RESULT_NEEDRESET:
+ return sprintf(buf, "need_reset\n");
+ default:
+ return -ENXIO;
+ }
+}
+static DEVICE_ATTR_ADMIN_RO(result);
+
+static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_state state;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ state = nvdimm->fw_ops->activate_state(nvdimm);
+ nvdimm_bus_unlock(dev);
+
+ switch (state) {
+ case NVDIMM_FWA_IDLE:
+ return sprintf(buf, "idle\n");
+ case NVDIMM_FWA_BUSY:
+ return sprintf(buf, "busy\n");
+ case NVDIMM_FWA_ARMED:
+ return sprintf(buf, "armed\n");
+ default:
+ return -ENXIO;
+ }
+}
+
+static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_trigger arg;
+ int rc;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ if (sysfs_streq(buf, "arm"))
+ arg = NVDIMM_FWA_ARM;
+ else if (sysfs_streq(buf, "disarm"))
+ arg = NVDIMM_FWA_DISARM;
+ else
+ return -EINVAL;
+
+ nvdimm_bus_lock(dev);
+ rc = nvdimm->fw_ops->arm(nvdimm, arg);
+ nvdimm_bus_unlock(dev);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static struct attribute *nvdimm_firmware_attributes[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_result.attr,
+ NULL,
+};
+
+static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_capability cap;
+
+ if (!nd_desc->fw_ops)
+ return 0;
+ if (!nvdimm->fw_ops)
+ return 0;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group nvdimm_firmware_attribute_group = {
+ .name = "firmware",
+ .attrs = nvdimm_firmware_attributes,
+ .is_visible = nvdimm_firmware_visible,
+};
+
static const struct attribute_group *nvdimm_attribute_groups[] = {
&nd_device_attribute_group,
&nvdimm_attribute_group,
+ &nvdimm_firmware_attribute_group,
NULL,
};
@@ -467,7 +583,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id,
- const struct nvdimm_security_ops *sec_ops)
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -497,6 +614,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
dev->devt = MKDEV(nvdimm_major, nvdimm->id);
dev->groups = groups;
nvdimm->sec.ops = sec_ops;
+ nvdimm->fw_ops = fw_ops;
nvdimm->sec.overwrite_tmo = 0;
INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
/*
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index ae155e860fdc..6da67f4d641a 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1309,7 +1309,7 @@ static ssize_t resource_show(struct device *dev,
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index ddb9d97d9129..564faa36a3ca 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -45,6 +45,7 @@ struct nvdimm {
struct kernfs_node *overwrite_state;
} sec;
struct delayed_work dwork;
+ const struct nvdimm_fw_ops *fw_ops;
};
static inline unsigned long nvdimm_security_flags(
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 34db557dbad1..3e11ef8d3f5b 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -218,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 94790e6e0e4c..fab29b514372 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
blk_status_t rc;
if (op_is_write(op))
- rc = pmem_do_write(pmem, page, 0, sector,
- hpage_nr_pages(page) * PAGE_SIZE);
+ rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
else
- rc = pmem_do_read(pmem, page, 0, sector,
- hpage_nr_pages(page) * PAGE_SIZE);
+ rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
/*
* The ->rw_page interface is subtle and tricky. The core
* retries on any error, so we can only invoke page_endio() in
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index c3237c2b03a6..ef23119db574 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -605,7 +605,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4cef69bd3c1b..4b80150e4afa 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
else
dev_dbg(&nvdimm->dev, "overwrite completed\n");
- if (nvdimm->sec.overwrite_state)
- sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+ /*
+ * Mark the overwrite work done and update dimm security flags,
+ * then send a sysfs event notification to wake up userspace
+ * poll threads to picked up the changed state.
+ */
nvdimm->sec.overwrite_tmo = 0;
clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
- put_device(&nvdimm->dev);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ if (nvdimm->sec.overwrite_state)
+ sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+ put_device(&nvdimm->dev);
}
void nvdimm_security_overwrite_query(struct work_struct *work)
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 5e3d07b47e0c..726c7354d465 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
goto out_err;
}
- virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
start, &vpmem->start);
- virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
size, &vpmem->size);
res.start = vpmem->start;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 88cff309d8e4..154942fc64eb 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -241,17 +241,6 @@ static blk_status_t nvme_error_status(u16 status)
}
}
-static inline bool nvme_req_needs_retry(struct request *req)
-{
- if (blk_noretry_request(req))
- return false;
- if (nvme_req(req)->status & NVME_SC_DNR)
- return false;
- if (nvme_req(req)->retries >= nvme_max_retries)
- return false;
- return true;
-}
-
static void nvme_retry_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -268,34 +257,67 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay);
}
-void nvme_complete_rq(struct request *req)
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+};
+
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
- trace_nvme_complete_rq(req);
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
- nvme_cleanup_cmd(req);
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
+ }
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ return RETRY;
+}
- if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
- return;
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (!blk_queue_dying(req->q)) {
- nvme_retry_req(req);
- return;
- }
- } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- req_op(req) == REQ_OP_ZONE_APPEND) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- }
nvme_trace_bio_complete(req, status);
blk_mq_end_request(req, status);
}
+
+void nvme_complete_rq(struct request *req)
+{
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
+ if (nvme_req(req)->ctrl->kas)
+ nvme_req(req)->ctrl->comp_seen = true;
+
+ switch (nvme_decide_disposition(req)) {
+ case COMPLETE:
+ nvme_end_req(req);
+ return;
+ case RETRY:
+ nvme_retry_req(req);
+ return;
+ case FAILOVER:
+ nvme_failover_req(req);
+ return;
+ }
+}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
@@ -330,7 +352,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -340,7 +362,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -350,7 +372,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -361,7 +383,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -371,7 +393,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
case NVME_CTRL_DEAD:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -380,7 +402,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_DELETING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -2075,7 +2097,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
}
}
- if (iob)
+ if (iob && !blk_queue_is_zoned(ns->queue))
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
@@ -2965,14 +2987,14 @@ static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi)
{
struct nvme_cel *cel, *ret = NULL;
- spin_lock(&ctrl->lock);
+ spin_lock_irq(&ctrl->lock);
list_for_each_entry(cel, &ctrl->cels, entry) {
if (cel->csi == csi) {
ret = cel;
break;
}
}
- spin_unlock(&ctrl->lock);
+ spin_unlock_irq(&ctrl->lock);
return ret;
}
@@ -2999,9 +3021,9 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
cel->csi = csi;
- spin_lock(&ctrl->lock);
+ spin_lock_irq(&ctrl->lock);
list_add_tail(&cel->entry, &ctrl->cels);
- spin_unlock(&ctrl->lock);
+ spin_unlock_irq(&ctrl->lock);
out:
*log = &cel->log;
return 0;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index eae43bb444e0..a7f474ddfff7 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2035,7 +2035,7 @@ done:
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!nvme_end_request(rq, status, result))
+ if (!nvme_try_complete_req(rq, status, result))
nvme_fc_complete_rq(rq);
check_error:
@@ -2078,7 +2078,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
dev_err(ctrl->dev,
"FCP Op failed - cmdiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
goto out_on_error;
}
@@ -2088,7 +2088,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
dev_err(ctrl->dev,
"FCP Op failed - rspiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
}
atomic_set(&op->state, FCPOP_STATE_IDLE);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 3ded54d2c9c6..d4ba736c6c89 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -65,51 +65,30 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
}
}
-bool nvme_failover_req(struct request *req)
+void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status;
+ u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags;
- switch (status & 0x7ff) {
- case NVME_SC_ANA_TRANSITION:
- case NVME_SC_ANA_INACCESSIBLE:
- case NVME_SC_ANA_PERSISTENT_LOSS:
- /*
- * If we got back an ANA error we know the controller is alive,
- * but not ready to serve this namespaces. The spec suggests
- * we should update our general state here, but due to the fact
- * that the admin and I/O queues are not serialized that is
- * fundamentally racy. So instead just clear the current path,
- * mark the the path as pending and kick of a re-read of the ANA
- * log page ASAP.
- */
- nvme_mpath_clear_current_path(ns);
- if (ns->ctrl->ana_log_buf) {
- set_bit(NVME_NS_ANA_PENDING, &ns->flags);
- queue_work(nvme_wq, &ns->ctrl->ana_work);
- }
- break;
- case NVME_SC_HOST_PATH_ERROR:
- case NVME_SC_HOST_ABORTED_CMD:
- /*
- * Temporary transport disruption in talking to the controller.
- * Try to send on a new path.
- */
- nvme_mpath_clear_current_path(ns);
- break;
- default:
- /* This was a non-ANA error so follow the normal error path. */
- return false;
+ nvme_mpath_clear_current_path(ns);
+
+ /*
+ * If we got back an ANA error, we know the controller is alive but not
+ * ready to serve this namespace. Kick of a re-read of the ANA
+ * information page, and just try any other available path for now.
+ */
+ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ blk_mq_end_request(req, 0);
kblockd_schedule_work(&ns->head->requeue_work);
- return true;
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -233,7 +212,7 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
int node, struct nvme_ns *old)
{
- struct nvme_ns *ns, *found, *fallback = NULL;
+ struct nvme_ns *ns, *found = NULL;
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
@@ -252,18 +231,22 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
goto out;
}
if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
- fallback = ns;
+ found = ns;
}
- /* No optimized path found, re-check the current path */
+ /*
+ * The loop above skips the current path for round-robin semantics.
+ * Fall back to the current path if either:
+ * - no other optimized path found and current is optimized,
+ * - no other usable path found and current is usable.
+ */
if (!nvme_path_is_disabled(old) &&
- old->ana_state == NVME_ANA_OPTIMIZED) {
- found = old;
- goto out;
- }
- if (!fallback)
+ (old->ana_state == NVME_ANA_OPTIMIZED ||
+ (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
+ return old;
+
+ if (!found)
return NULL;
- found = fallback;
out:
rcu_assign_pointer(head->current_path[node], found);
return found;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ebb8c3ed3885..e9cf29449dd1 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -523,7 +523,31 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1;
}
-static inline bool nvme_end_request(struct request *req, __le16 status,
+static inline bool nvme_is_ana_error(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool nvme_is_path_error(u16 status)
+{
+ /* check for a status code type of 'path related status' */
+ return (status & 0x700) == 0x300;
+}
+
+/*
+ * Fill in the status and result information from the CQE, and then figure out
+ * if blk-mq will need to use IPI magic to complete the request, and if yes do
+ * so. If not let the caller complete the request without an indirect function
+ * call.
+ */
+static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
@@ -629,7 +653,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
-bool nvme_failover_req(struct request *req);
+void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
@@ -688,9 +712,8 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
-static inline bool nvme_failover_req(struct request *req)
+static inline void nvme_failover_req(struct request *req)
{
- return false;
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ba725ae47305..9aa948782704 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -120,7 +120,7 @@ struct nvme_dev {
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
- u16 q_depth;
+ u32 q_depth;
int io_sqes;
u32 db_stride;
void __iomem *bar;
@@ -157,13 +157,13 @@ struct nvme_dev {
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
int ret;
- u16 n;
+ u32 n;
- ret = kstrtou16(val, 10, &n);
+ ret = kstrtou32(val, 10, &n);
if (ret != 0 || n < 2)
return -EINVAL;
- return param_set_ushort(val, kp);
+ return param_set_uint(val, kp);
}
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -195,7 +195,7 @@ struct nvme_queue {
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
- u16 q_depth;
+ u32 q_depth;
u16 cq_vector;
u16 sq_tail;
u16 cq_head;
@@ -961,7 +961,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- if (!nvme_end_request(req, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req);
}
@@ -1244,7 +1244,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- /* fall through */
+ fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
@@ -2320,7 +2320,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
- dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
@@ -2460,7 +2460,8 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- PAGE_SIZE, PAGE_SIZE, 0);
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 44c76ffbb264..eb1d54af7c77 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1189,7 +1189,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
if (!refcount_dec_and_test(&req->ref))
return;
- if (!nvme_end_request(rq, req->status, req->result))
+ if (!nvme_try_complete_req(rq, req->status, req->result))
nvme_rdma_complete_rq(rq);
}
@@ -1915,7 +1915,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 62fbaecdc960..a44d8ace3a81 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -481,7 +481,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_end_request(rq, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
@@ -672,7 +672,7 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
- if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
nvme_complete_rq(rq);
}
@@ -866,7 +866,6 @@ static void nvme_tcp_state_change(struct sock *sk)
case TCP_LAST_ACK:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
- /* fallthrough */
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
break;
default:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 74b2b61c773b..37e1d7784e17 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1136,6 +1136,7 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
up_write(&nvmet_config_sem);
kfree_rcu(new_model, rcuhead);
+ kfree(new_model_number);
return count;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b92f45f5cd5b..b7b63330b5ef 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -73,7 +73,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
status = NVME_SC_ACCESS_DENIED;
break;
case -EIO:
- /* FALLTHRU */
+ fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
status = NVME_SC_INTERNAL | NVME_SC_DNR;
@@ -397,6 +397,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -406,6 +409,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
cancel_delayed_work_sync(&ctrl->ka_work);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c97e60b71bbc..3da067a8311e 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -812,7 +812,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
/* Fall-Thru to RSP handling */
- /* FALLTHRU */
+ fallthrough;
case NVMET_FCOP_RSP:
if (fcpreq) {
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 3dd6f566a240..125dde3f410e 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -139,7 +139,6 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
req->error_loc = offsetof(struct nvme_rw_command, nsid);
break;
case BLK_STS_IOERR:
- /* fallthru */
default:
status = NVME_SC_INTERNAL | NVME_SC_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4884ef1e46a2..0d6008cf66a2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
return;
}
- if (!nvme_end_request(rq, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_loop_complete_rq(rq);
}
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 89d91dc999a6..8bd7f656e240 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -165,7 +165,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static void nvmet_passthru_req_done(struct request *rq,
@@ -175,7 +175,7 @@ static void nvmet_passthru_req_done(struct request *rq,
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
@@ -230,7 +230,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
- goto fail_out;
+ goto out;
}
q = ns->queue;
@@ -238,16 +238,15 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(rq)) {
- rq = NULL;
status = NVME_SC_INTERNAL;
- goto fail_out;
+ goto out_put_ns;
}
if (req->sg_cnt) {
ret = nvmet_passthru_map_sg(req, rq);
if (unlikely(ret)) {
status = NVME_SC_INTERNAL;
- goto fail_out;
+ goto out_put_req;
}
}
@@ -274,11 +273,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
return;
-fail_out:
+out_put_req:
+ blk_mq_free_request(rq);
+out_put_ns:
if (ns)
nvme_put_ns(ns);
+out:
nvmet_req_complete(req, status);
- blk_put_request(rq);
}
/*
@@ -326,6 +327,10 @@ static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
switch (req->cmd->common.opcode) {
case nvme_cmd_resv_register:
case nvme_cmd_resv_report:
@@ -396,6 +401,10 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
/*
* Passthru all vendor specific commands
*/
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3ccb59260b4a..ae6620489457 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1758,7 +1758,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
schedule_delayed_work(&port->repair_work, 0);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
@@ -1769,7 +1769,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_REJECTED:
pr_debug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 590493e04b01..da4f7341323f 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -128,15 +128,29 @@ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
* PCI bus specific translator
*/
+static bool of_node_is_pcie(struct device_node *np)
+{
+ bool is_pcie = of_node_name_eq(np, "pcie");
+
+ if (is_pcie)
+ pr_warn_once("%pOF: Missing device_type\n", np);
+
+ return is_pcie;
+}
+
static int of_bus_pci_match(struct device_node *np)
{
/*
* "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
+ *
+ * If none of the device_type match, and that the node name is
+ * "pcie", accept the device as PCI (with a warning).
*/
return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
- of_node_is_type(np, "vci") || of_node_is_type(np, "ht");
+ of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
+ of_node_is_pcie(np);
}
static void of_bus_pci_count_cells(struct device_node *np,
@@ -985,6 +999,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
/* Don't error out as we'd break some existing DTs */
continue;
}
+ if (range.cpu_addr == OF_BAD_ADDR) {
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ range.bus_addr, node);
+ continue;
+ }
dma_offset = range.cpu_addr - range.bus_addr;
/* Take lower and upper limits */
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 9d7fb45b1786..9668ea04cc80 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -893,8 +893,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* have OPP table for the device, while others don't and
* opp_set_rate() just needs to behave like clk_set_rate().
*/
- if (!_get_opp_count(opp_table))
- return 0;
+ if (!_get_opp_count(opp_table)) {
+ ret = 0;
+ goto put_opp_table;
+ }
if (!opp_table->required_opp_tables && !opp_table->regulators &&
!opp_table->paths) {
@@ -905,7 +907,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
ret = _set_opp_bw(opp_table, NULL, dev, true);
if (ret)
- return ret;
+ goto put_opp_table;
if (opp_table->regulator_enabled) {
regulator_disable(opp_table->regulators[0]);
@@ -932,10 +934,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Return early if nothing to do */
if (old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
+ if (!opp_table->required_opp_tables && !opp_table->regulators &&
+ !opp_table->paths) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ ret = 0;
+ goto put_opp_table;
+ }
}
/*
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 5368452eb5a6..d4314fba0269 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
+ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index f28d6a3c5a68..4547ac44c8d4 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -260,7 +260,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
- /* fall through */
+ fallthrough;
default:
/* Terminate from all other modes. */
@@ -598,7 +598,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
- /* fall through */
+ fallthrough;
case IEEE1284_MODE_COMPAT:
pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
@@ -702,7 +702,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
- /* fall through - to NIBBLE */
+ fallthrough; /* to NIBBLE */
case IEEE1284_MODE_NIBBLE:
pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 77e37e3cb3a0..eda4ded4d5e5 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1647,7 +1647,7 @@ static int parport_ECP_supported(struct parport *pb)
break;
default:
pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
- /* Fall through - Assume 1 */
+ fallthrough; /* Assume 1 */
case 1:
pword = 1;
}
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 90df28c7cb0c..5fef2613b223 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -439,7 +439,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP:
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -642,7 +642,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
- /* FALLTHROUGH */
+ fallthrough;
default:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -1105,7 +1105,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "pcie_aux clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_aux);
}
- /* fall through */
+ fallthrough;
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx6_pcie->controller_id = 1;
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index c9530038ca9a..afde4aa8f6dc 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -223,7 +223,7 @@ static void rcar_pci_setup(struct rcar_pci_priv *priv)
pr_warn("unknown window size %ld - defaulting to 256M\n",
window_size);
window_size = SZ_256M;
- /* fall-through */
+ fallthrough;
case SZ_256M:
val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
break;
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index 5c93aa14f0de..ae9acc77d14f 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -1941,7 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus)
break;
case PCI_HEADER_TYPE_BRIDGE:
function = 0x8;
- /* fall through */
+ fallthrough;
case PCI_HEADER_TYPE_MULTIBRIDGE:
/* We assume here that only 1 bus behind the bridge
TO DO: add functionality for several:
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 6503d15effbb..9f85815b4f53 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -236,7 +236,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
switch (ctrl->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&ctrl->button_work);
- /* fall through */
+ fallthrough;
case ON_STATE:
ctrl->state = POWEROFF_STATE;
mutex_unlock(&ctrl->state_lock);
@@ -265,7 +265,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
switch (ctrl->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&ctrl->button_work);
- /* fall through */
+ fallthrough;
case OFF_STATE:
ctrl->state = POWERON_STATE;
mutex_unlock(&ctrl->state_lock);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index b59f84918fe0..c9e790c74051 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -83,21 +83,19 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
struct pci_dev *pdev;
- struct zpci_bus *zbus = zdev->zbus;
int rc;
if (!zpci_fn_configured(zdev->state))
return -EIO;
- pdev = pci_get_slot(zbus->bus, zdev->devfn);
- if (pdev) {
- if (pci_num_vf(pdev))
- return -EBUSY;
-
- pci_stop_and_remove_bus_device_locked(pdev);
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ if (pdev && pci_num_vf(pdev)) {
pci_dev_put(pdev);
+ return -EBUSY;
}
+ zpci_remove_device(zdev);
+
rc = zpci_disable_device(zdev);
if (rc)
return rc;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index afdc52d1cae7..65502e3f7b4f 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -642,7 +642,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
- /* fall through */
+ fallthrough;
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
@@ -678,7 +678,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
- /* fall through */
+ fallthrough;
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 64ebed129dbf..f357f9a32b3a 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -556,13 +556,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
return -1;
for (i = 0; i < num_clients; i++) {
- if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
- clients[i]->dma_ops == &dma_virt_ops) {
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (clients[i]->dma_ops == &dma_virt_ops) {
if (verbose)
dev_warn(clients[i],
"cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
return -1;
}
+#endif
pci_client = find_parent_pci_dev(clients[i]);
if (!pci_client) {
@@ -842,9 +843,10 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
* this should never happen because it will be prevented
* by the check in pci_p2pdma_distance_many()
*/
- if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
- dev->dma_ops == &dma_virt_ops))
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops))
return 0;
+#endif
for_each_sg(sg, s, nents, i) {
paddr = sg_phys(s);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a458c46d7e39..e39c5499770f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1049,7 +1049,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = true;
- /* Fall-through - force to D0 */
+ fallthrough; /* force to D0 */
default:
pmcsr = 0;
break;
@@ -2541,7 +2541,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
case PCI_D2:
if (pci_no_d1d2(dev))
break;
- /* else, fall through */
+ fallthrough;
default:
target_state = state;
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index bd2b691fa7a3..d35186b01d98 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,7 +231,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
- /* fall through */
+ fallthrough;
default:
ret = -EINVAL;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bdf9b52567e0..2a589b6d6ed8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1730,7 +1730,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
case PCI_DEVICE_ID_JMICRON_JMB366:
/* Redirect IDE second PATA port to the right spot */
conf5 |= (1 << 24);
- /* Fall through */
+ fallthrough;
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
case PCI_DEVICE_ID_JMICRON_JMB369:
@@ -2224,7 +2224,7 @@ static void quirk_netmos(struct pci_dev *dev)
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
- /* else, fall through */
+ fallthrough;
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 3951e02b7ded..2ce636937c6e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1253,7 +1253,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
additional_mmio_size = pci_hotplug_mmio_size;
additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
}
- /* Fall through */
+ fallthrough;
default:
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index fab267e359e7..c0e85be598c1 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1096,7 +1096,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateClosed:
if (xdev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 590e594092f2..a7c7c7cd2326 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -255,10 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
switch (state->Vcc) {
case 50:
++v;
- /* fall through */
+ fallthrough;
case 33:
++v;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
@@ -269,11 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
switch (state->Vpp) {
case 12:
++p;
- /* fall through */
+ fallthrough;
case 33:
case 50:
++p;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 7b7d23f25713..a0a71c1df042 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1404,7 +1404,7 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
break;
case CCN_TYPE_SBAS:
ccn->sbas_present = 1;
- /* Fall-through */
+ fallthrough;
default:
component = &ccn->node[id];
break;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index e51ddb6d63ed..cc00915ad6d1 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1002,7 +1002,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
default:
dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
fld);
- /* Fallthrough */
+ fallthrough;
case 8:
spe_pmu->min_period = 4096;
}
@@ -1021,7 +1021,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
default:
dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
fld);
- /* Fallthrough */
+ fallthrough;
case 2:
spe_pmu->counter_sz = 12;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 61054272a7c8..327df1a99f77 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -53,7 +53,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy,
case PHY_MODE_USB_OTG:
case PHY_MODE_USB_HOST:
val |= ULPI_INT_IDGRD;
- /* fall through */
+ fallthrough;
case PHY_MODE_USB_DEVICE:
val |= ULPI_INT_SESS_VALID;
default:
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index a84e9f027fc4..46ebdb1460a3 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -546,7 +546,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
rport->state = OTG_STATE_B_IDLE;
if (!vbus_attach)
rockchip_usb2phy_power_off(rport->phy);
- /* fall through */
+ fallthrough;
case OTG_STATE_B_IDLE:
if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) {
dev_dbg(&rport->phy->dev, "usb otg host connect\n");
@@ -754,11 +754,11 @@ static void rockchip_chg_detect_work(struct work_struct *work)
rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
else
rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
- /* fall through */
+ fallthrough;
case USB_CHG_STATE_SECONDARY_DONE:
rphy->chg_state = USB_CHG_STATE_DETECTED;
delay = 0;
- /* fall through */
+ fallthrough;
case USB_CHG_STATE_DETECTED:
/* put the controller in normal mode */
property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
@@ -835,7 +835,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
dev_dbg(&rport->phy->dev, "FS/LS online\n");
break;
}
- /* fall through */
+ fallthrough;
case PHY_STATE_CONNECT:
if (rport->suspended) {
dev_dbg(&rport->phy->dev, "Connected\n");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cf072153bdc5..a056031dee81 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -218,6 +218,7 @@ config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
depends on CROS_USBPD_NOTIFY
+ depends on USB_ROLE_SWITCH
default MFD_CROS_EC_DEV
help
If you say Y here, you get support for accessing Type C connector
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index ecfada00e6c5..272c89837d74 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -242,6 +242,25 @@ static ssize_t cros_ec_pdinfo_read(struct file *file,
read_buf, p - read_buf);
}
+static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
+{
+ struct {
+ struct cros_ec_command cmd;
+ struct ec_response_uptime_info resp;
+ } __packed msg = {};
+ int ret;
+
+ msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
+ msg.cmd.insize = sizeof(msg.resp);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+ if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
+ return false;
+
+ /* Other errors maybe a transient error, do not rule about support. */
+ return true;
+}
+
static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -444,8 +463,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
&cros_ec_pdinfo_fops);
- debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
- &cros_ec_uptime_fops);
+ if (cros_ec_uptime_is_supported(ec->ec_dev))
+ debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
+ &cros_ec_uptime_fops);
debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
&ec->ec_dev->last_resume_result);
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index ed794a7ddba9..81364029af36 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -681,8 +681,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
/* Register croc_ec_dev mfd */
rv = cros_ec_dev_init(client_data);
- if (rv)
+ if (rv) {
+ down_write(&init_lock);
goto end_cros_ec_dev_init_error;
+ }
return 0;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 3e745e0fe092..8d52b3b4bd4e 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -208,6 +208,12 @@ static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
msg->insize = sizeof(*r);
ret = send_command(ec_dev, msg);
+ if (ret >= 0) {
+ if (msg->result == EC_RES_INVALID_COMMAND)
+ return -EOPNOTSUPP;
+ if (msg->result != EC_RES_SUCCESS)
+ return -EPROTO;
+ }
if (ret > 0) {
r = (struct ec_response_host_event_mask *)msg->data;
*mask = r->mask;
@@ -469,14 +475,33 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
&ver_mask);
ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
- /*
- * Get host event wake mask, assume all events are wake events
- * if unavailable.
- */
+ /* Get host event wake mask. */
ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
&ec_dev->host_event_wake_mask);
- if (ret < 0)
- ec_dev->host_event_wake_mask = U32_MAX;
+ if (ret < 0) {
+ /*
+ * If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
+ * use a reasonable default. Note that we ignore various
+ * battery, AC status, and power-state events, because (a)
+ * those can be quite common (e.g., when sitting at full
+ * charge, on AC) and (b) these are not actionable wake events;
+ * if anything, we'd like to continue suspending (to save
+ * power), not wake up.
+ */
+ ec_dev->host_event_wake_mask = U32_MAX &
+ ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
+ BIT(EC_HOST_EVENT_BATTERY_LOW) |
+ BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
+ BIT(EC_HOST_EVENT_PD_MCU) |
+ BIT(EC_HOST_EVENT_BATTERY_STATUS));
+ /*
+ * Old ECs may not support this command. Complain about all
+ * other errors.
+ */
+ if (ret != -EOPNOTSUPP)
+ dev_err(ec_dev->dev,
+ "failed to retrieve wake mask: %d\n", ret);
+ }
ret = 0;
@@ -496,8 +521,8 @@ EXPORT_SYMBOL(cros_ec_query_all);
*
* Return: 0 on success or negative error code.
*/
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
int ret;
@@ -541,7 +566,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
-EXPORT_SYMBOL(cros_ec_cmd_xfer);
/**
* cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 7e8629e3db74..30d0ba3b8889 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -38,6 +38,9 @@ struct cros_ec_rpmsg_response {
* @rpdev: rpmsg device we are connected to
* @xfer_ack: completion for host command transfer.
* @host_event_work: Work struct for pending host event.
+ * @ept: The rpmsg endpoint of this channel.
+ * @has_pending_host_event: Boolean used to check if there is a pending event.
+ * @probe_done: Flag to indicate that probe is done.
*/
struct cros_ec_rpmsg {
struct rpmsg_device *rpdev;
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 24e48d96ed76..8921f24e83ba 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -419,9 +419,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
* Disable filtering since we might add more jitter
* if b is in a random point in time.
*/
- new_timestamp = fifo_timestamp -
- fifo_info->timestamp * 1000 +
- in->timestamp * 1000;
+ new_timestamp = c - b * 1000 + a * 1000;
/*
* The timestamp can be stale if we had to use the fifo
* info timestamp.
@@ -675,29 +673,22 @@ done_with_this_batch:
* cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
* add to ringbuffer (legacy).
*
- * Note: This assumes we're running old firmware, where every sample's timestamp
- * is after the sample. Run if tight_timestamps == false.
- *
- * If there is a sample with a proper timestamp
+ * Note: This assumes we're running old firmware, where timestamp
+ * is inserted after its sample(s)e. There can be several samples between
+ * timestamps, so several samples can have the same timestamp.
*
* timestamp | count
* -----------------
- * older_unprocess_out --> TS1 | 1
- * TS1 | 2
- * out --> TS1 | 3
- * next_out --> TS2 |
- *
- * We spread time for the samples [older_unprocess_out .. out]
- * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ * 1st sample --> TS1 | 1
+ * TS2 | 2
+ * TS2 | 3
+ * TS3 | 4
+ * last_out -->
*
- * If we reach the end of the samples, we compare with the
- * current timestamp:
*
- * older_unprocess_out --> TS1 | 1
- * TS1 | 2
- * out --> TS1 | 3
+ * We spread time for the samples using perod p = (current - TS1)/4.
+ * between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
*
- * We know have [TS1+1/3, TS1+2/3, current timestamp]
*/
static void
cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
@@ -710,58 +701,37 @@ cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
int i;
for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
- s64 older_timestamp;
s64 timestamp;
- struct cros_ec_sensors_ring_sample *older_unprocess_out =
- sensorhub->ring;
- struct cros_ec_sensors_ring_sample *next_out;
- int count = 1;
-
- for (out = sensorhub->ring; out < last_out; out = next_out) {
- s64 time_period;
+ int count = 0;
+ s64 time_period;
- next_out = out + 1;
+ for (out = sensorhub->ring; out < last_out; out++) {
if (out->sensor_id != i)
continue;
/* Timestamp to start with */
- older_timestamp = out->timestamp;
-
- /* Find next sample. */
- while (next_out < last_out && next_out->sensor_id != i)
- next_out++;
+ timestamp = out->timestamp;
+ out++;
+ count = 1;
+ break;
+ }
+ for (; out < last_out; out++) {
+ /* Find last sample. */
+ if (out->sensor_id != i)
+ continue;
+ count++;
+ }
+ if (count == 0)
+ continue;
- if (next_out >= last_out) {
- timestamp = current_timestamp;
- } else {
- timestamp = next_out->timestamp;
- if (timestamp == older_timestamp) {
- count++;
- continue;
- }
- }
+ /* Spread uniformly between the first and last samples. */
+ time_period = div_s64(current_timestamp - timestamp, count);
- /*
- * The next sample has a new timestamp, spread the
- * unprocessed samples.
- */
- if (next_out < last_out)
- count++;
- time_period = div_s64(timestamp - older_timestamp,
- count);
-
- for (; older_unprocess_out <= out;
- older_unprocess_out++) {
- if (older_unprocess_out->sensor_id != i)
- continue;
- older_timestamp += time_period;
- older_unprocess_out->timestamp =
- older_timestamp;
- }
- count = 1;
- /* The next_out sample has a valid timestamp, skip. */
- next_out++;
- older_unprocess_out = next_out;
+ for (out = sensorhub->ring; out < last_out; out++) {
+ if (out->sensor_id != i)
+ continue;
+ timestamp += time_period;
+ out->timestamp = timestamp;
}
}
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index d09260382550..dfa1f816a45f 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -148,6 +148,10 @@ static int terminate_request(struct cros_ec_device *ec_dev)
* receive_n_bytes - receive n bytes from the EC.
*
* Assumes buf is a pointer into the ec_dev->din buffer
+ *
+ * @ec_dev: ChromeOS EC device.
+ * @buf: Pointer to the buffer receiving the data.
+ * @n: Number of bytes received.
*/
static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n)
{
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 66b8d21092af..3fcd27ec9ad8 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -14,9 +14,21 @@
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/role.h>
#define DRV_NAME "cros-ec-typec"
+/* Supported alt modes. */
+enum {
+ CROS_EC_ALTMODE_DP = 0,
+ CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_MAX,
+};
+
/* Per port data. */
struct cros_typec_port {
struct typec_port *port;
@@ -25,6 +37,16 @@ struct cros_typec_port {
struct typec_partner *partner;
/* Port partner PD identity info. */
struct usb_pd_identity p_identity;
+ struct typec_switch *ori_sw;
+ struct typec_mux *mux;
+ struct usb_role_switch *role_sw;
+
+ /* Variables keeping track of switch state. */
+ struct typec_mux_state state;
+ uint8_t mux_flags;
+
+ /* Port alt modes. */
+ struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -32,10 +54,11 @@ struct cros_typec_data {
struct device *dev;
struct cros_ec_device *ec;
int num_ports;
- unsigned int cmd_ver;
+ unsigned int pd_ctrl_ver;
/* Array of ports, indexed by port number. */
struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block nb;
+ struct work_struct port_work;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -84,6 +107,81 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
return 0;
}
+static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ port->mux = fwnode_typec_mux_get(fwnode, NULL);
+ if (IS_ERR(port->mux)) {
+ dev_dbg(dev, "Mux handle not found.\n");
+ goto mux_err;
+ }
+
+ port->ori_sw = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(port->ori_sw)) {
+ dev_dbg(dev, "Orientation switch handle not found.\n");
+ goto ori_sw_err;
+ }
+
+ port->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(port->role_sw)) {
+ dev_dbg(dev, "USB role switch handle not found.\n");
+ goto role_sw_err;
+ }
+
+ return 0;
+
+role_sw_err:
+ usb_role_switch_put(port->role_sw);
+ori_sw_err:
+ typec_switch_put(port->ori_sw);
+mux_err:
+ typec_mux_put(port->mux);
+
+ return -ENODEV;
+}
+
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+ bool pd_en)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_partner_desc p_desc = {
+ .usb_pd = pd_en,
+ };
+ int ret = 0;
+
+ /*
+ * Fill an initial PD identity, which will then be updated with info
+ * from the EC.
+ */
+ p_desc.identity = &port->p_identity;
+
+ port->partner = typec_register_partner(port->port, &p_desc);
+ if (IS_ERR(port->partner)) {
+ ret = PTR_ERR(port->partner);
+ port->partner = NULL;
+ }
+
+ return ret;
+}
+
+static void cros_typec_remove_partner(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+
+ usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
+ typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+ typec_mux_set(port->mux, &port->state);
+
+ typec_unregister_partner(port->partner);
+ port->partner = NULL;
+}
+
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
@@ -91,10 +189,40 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
for (i = 0; i < typec->num_ports; i++) {
if (!typec->ports[i])
continue;
+ cros_typec_remove_partner(typec, i);
+ usb_role_switch_put(typec->ports[i]->role_sw);
+ typec_switch_put(typec->ports[i]->ori_sw);
+ typec_mux_put(typec->ports[i]->mux);
typec_unregister_port(typec->ports[i]->port);
}
}
+/*
+ * Fake the alt mode structs until we actually start registering Type C port
+ * and partner alt modes.
+ */
+static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ /* All PD capable CrOS devices are assumed to support DP altmode. */
+ port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
+ port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+
+ /*
+ * Register TBT compatibility alt mode. The EC will not enter the mode
+ * if it doesn't support it, so it's safe to register it unconditionally
+ * here for now.
+ */
+ port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
+ port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+}
+
static int cros_typec_init_ports(struct cros_typec_data *typec)
{
struct device *dev = typec->dev;
@@ -153,6 +281,13 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
ret = PTR_ERR(cros_port->port);
goto unregister_ports;
}
+
+ ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
+ if (ret)
+ dev_dbg(dev, "No switch control for port %d\n",
+ port_num);
+
+ cros_typec_register_port_altmodes(typec, port_num);
}
return 0;
@@ -193,30 +328,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
-static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
- bool pd_en)
-{
- struct cros_typec_port *port = typec->ports[port_num];
- struct typec_partner_desc p_desc = {
- .usb_pd = pd_en,
- };
- int ret = 0;
-
- /*
- * Fill an initial PD identity, which will then be updated with info
- * from the EC.
- */
- p_desc.identity = &port->p_identity;
-
- port->partner = typec_register_partner(port->port, &p_desc);
- if (IS_ERR(port->partner)) {
- ret = PTR_ERR(port->partner);
- port->partner = NULL;
- }
-
- return ret;
-}
-
static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control *resp)
{
@@ -270,16 +381,166 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
} else {
if (!typec->ports[port_num]->partner)
return;
+ cros_typec_remove_partner(typec, port_num);
+ }
+}
- typec_unregister_partner(typec->ports[port_num]->partner);
- typec->ports[port_num]->partner = NULL;
+static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
+ struct ec_response_usb_pd_mux_info *resp)
+{
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
+
+ return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
+ sizeof(req), resp, sizeof(*resp));
+}
+
+static int cros_typec_usb_safe_state(struct cros_typec_port *port)
+{
+ port->state.mode = TYPEC_STATE_SAFE;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+/*
+ * Spoof the VDOs that were likely communicated by the partner for TBT alt
+ * mode.
+ */
+static int cros_typec_enable_tbt(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_thunderbolt_data data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ /* Device Discover Mode VDO */
+ data.device_mode = TBT_MODE;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER)
+ data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3);
+
+ /* Cable Discover Mode VDO */
+ data.cable_mode = TBT_MODE;
+ data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+ data.cable_mode |= TBT_CABLE_OPTICAL;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
+ data.cable_mode |= TBT_CABLE_LINK_TRAINING;
+
+ if (pd_ctrl->cable_gen)
+ data.cable_mode |= TBT_CABLE_ROUNDED;
+
+ /* Enter Mode VDO */
+ data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+ if (!port->state.alt) {
+ port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
+ }
+
+ port->state.data = &data;
+ port->state.mode = TYPEC_TBT_MODE;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+/* Spoof the VDOs that were likely communicated by the partner. */
+static int cros_typec_enable_dp(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_displayport_data dp_data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ /* Status VDO. */
+ dp_data.status = DP_STATUS_ENABLED;
+ if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
+ dp_data.status |= DP_STATUS_IRQ_HPD;
+ if (port->mux_flags & USB_PD_MUX_HPD_LVL)
+ dp_data.status |= DP_STATUS_HPD_STATE;
+
+ /* Configuration VDO. */
+ dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
+ if (!port->state.alt) {
+ port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
}
+
+ port->state.data = &dp_data;
+ port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
+ uint8_t mux_flags,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ enum typec_orientation orientation;
+ int ret;
+
+ if (!port->partner)
+ return 0;
+
+ if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+ orientation = TYPEC_ORIENTATION_REVERSE;
+ else
+ orientation = TYPEC_ORIENTATION_NORMAL;
+
+ ret = typec_switch_set(port->ori_sw, orientation);
+ if (ret)
+ return ret;
+
+ if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+ ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
+ } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+ ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
+ } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+ ret = cros_typec_usb_safe_state(port);
+ } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ ret = typec_mux_set(port->mux, &port->state);
+ } else {
+ dev_info(typec->dev,
+ "Unsupported mode requested, mux flags: %x\n",
+ mux_flags);
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
- struct ec_response_usb_pd_control_v1 resp;
+ struct ec_response_usb_pd_control_v2 resp;
+ struct ec_response_usb_pd_mux_info mux_resp;
int ret;
if (port_num < 0 || port_num >= typec->num_ports) {
@@ -293,7 +554,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
- ret = cros_typec_ec_command(typec, typec->cmd_ver,
+ ret = cros_typec_ec_command(typec, typec->pd_ctrl_ver,
EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0)
@@ -304,13 +565,33 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
- if (typec->cmd_ver == 1)
- cros_typec_set_port_params_v1(typec, port_num, &resp);
+ if (typec->pd_ctrl_ver != 0)
+ cros_typec_set_port_params_v1(typec, port_num,
+ (struct ec_response_usb_pd_control_v1 *)&resp);
else
cros_typec_set_port_params_v0(typec, port_num,
(struct ec_response_usb_pd_control *) &resp);
- return 0;
+ /* Update the switches if they exist, according to requested state */
+ ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
+ if (ret < 0) {
+ dev_warn(typec->dev,
+ "Failed to get mux info for port: %d, err = %d\n",
+ port_num, ret);
+ return 0;
+ }
+
+ /* No change needs to be made, let's exit early. */
+ if (typec->ports[port_num]->mux_flags == mux_resp.flags)
+ return 0;
+
+ typec->ports[port_num]->mux_flags = mux_resp.flags;
+ ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
+ if (ret)
+ dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
+ return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+ !!(resp.role & PD_CTRL_RESP_ROLE_DATA));
}
static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -327,22 +608,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
if (ret < 0)
return ret;
- if (resp.version_mask & EC_VER_MASK(1))
- typec->cmd_ver = 1;
+ if (resp.version_mask & EC_VER_MASK(2))
+ typec->pd_ctrl_ver = 2;
+ else if (resp.version_mask & EC_VER_MASK(1))
+ typec->pd_ctrl_ver = 1;
else
- typec->cmd_ver = 0;
+ typec->pd_ctrl_ver = 0;
dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n",
- typec->cmd_ver);
+ typec->pd_ctrl_ver);
return 0;
}
-static int cros_ec_typec_event(struct notifier_block *nb,
- unsigned long host_event, void *_notify)
+static void cros_typec_port_work(struct work_struct *work)
{
- struct cros_typec_data *typec = container_of(nb, struct cros_typec_data,
- nb);
+ struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
int ret, i;
for (i = 0; i < typec->num_ports; i++) {
@@ -350,6 +631,14 @@ static int cros_ec_typec_event(struct notifier_block *nb,
if (ret < 0)
dev_warn(typec->dev, "Update failed for port: %d\n", i);
}
+}
+
+static int cros_ec_typec_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
+
+ schedule_work(&typec->port_work);
return NOTIFY_OK;
}
@@ -408,6 +697,12 @@ static int cros_typec_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ INIT_WORK(&typec->port_work, cros_typec_port_work);
+
+ /*
+ * Safe to call port update here, since we haven't registered the
+ * PD notifier yet.
+ */
for (i = 0; i < typec->num_ports; i++) {
ret = cros_typec_port_update(typec, i);
if (ret < 0)
@@ -426,11 +721,35 @@ unregister_ports:
return ret;
}
+static int __maybe_unused cros_typec_suspend(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ cancel_work_sync(&typec->port_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_typec_resume(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ /* Refresh port state. */
+ schedule_work(&typec->port_work);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cros_typec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume)
+};
+
static struct platform_driver cros_typec_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
.of_match_table = of_match_ptr(cros_typec_of_match),
+ .pm = &cros_typec_pm_ops,
},
.probe = cros_typec_probe,
};
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 5739a9669b29..bbc4e71a16ff 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net;
- if (ntohs(hdr.len) > config->mtu +
+ /* A legacy-only interface for now. */
+ if (ntohs(hdr.len) >
+ __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
return;
} else {
@@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
/* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config));
- net_config.mtu = ETH_DATA_LEN;
- net_config.status = VIRTIO_NET_S_LINK_UP;
+
+ /* A legacy-only interface for now. */
+ net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ ETH_DATA_LEN);
+ net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ VIRTIO_NET_S_LINK_UP);
mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config,
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 5e1d14e35f20..0d46706afd2d 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -431,7 +431,7 @@ static void olpc_xo175_ec_complete(void *arg)
input_sync(priv->pwrbtn);
input_report_key(priv->pwrbtn, KEY_POWER, 0);
input_sync(priv->pwrbtn);
- /* fall through */
+ fallthrough;
case EVENT_POWER_PRESS_WAKE:
case EVENT_TIMED_HOST_WAKE:
pm_wakeup_event(priv->pwrbtn->dev.parent,
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 60c18f21588d..49f4b73be513 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1001,7 +1001,7 @@ static acpi_status WMID_get_u32(u32 *value, u32 cap)
*value = tmp & 0x1;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return AE_ERROR;
}
@@ -1328,7 +1328,7 @@ static acpi_status get_u32(u32 *value, u32 cap)
status = AMW0_get_u32(value, cap);
break;
}
- /* fall through */
+ fallthrough;
case ACER_WMID:
status = WMID_get_u32(value, cap);
break;
@@ -1371,7 +1371,7 @@ static acpi_status set_u32(u32 value, u32 cap)
return AMW0_set_u32(value, cap);
}
- /* fall through */
+ fallthrough;
case ACER_WMID:
return WMID_set_u32(value, cap);
case ACER_WMID_v2:
@@ -1381,7 +1381,7 @@ static acpi_status set_u32(u32 value, u32 cap)
return wmid_v2_set_u32(value, cap);
else if (wmi_has_guid(WMID_GUID2))
return WMID_set_u32(value, cap);
- /* fall through */
+ fallthrough;
default:
return AE_BAD_PARAMETER;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 5e9c2296931c..70edc5bb3a14 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -1587,10 +1587,10 @@ static ssize_t kbd_led_timeout_store(struct device *dev,
switch (unit) {
case KBD_TIMEOUT_DAYS:
value *= 24;
- /* fall through */
+ fallthrough;
case KBD_TIMEOUT_HOURS:
value *= 60;
- /* fall through */
+ fallthrough;
case KBD_TIMEOUT_MINUTES:
value *= 60;
unit = KBD_TIMEOUT_SECONDS;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 90bc7969b199..8cf8c1be2666 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -186,7 +186,9 @@
#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
+#define MLXPLAT_CPLD_WD_CPBLTY_MASK (GENMASK(7, 0) & ~BIT(6))
#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
+#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600
#define MLXPLAT_CPLD_WD_MAX_DEVS 2
/* mlxplat_priv - platform private data
@@ -2084,6 +2086,84 @@ static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
},
};
+/* Watchdog type3: hardware implementation version 3
+ * Can be on all systems. It's differentiated by WD capability bit.
+ * Old systems (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140)
+ * still have only one main watchdog.
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type3[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type3[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type3,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type3),
+ .version = MLX_WDT_TYPE3,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type3,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type3),
+ .version = MLX_WDT_TYPE3,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -2114,8 +2194,10 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
@@ -2742,6 +2824,27 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
return 0;
}
+static int mlxplat_mlxcpld_check_wd_capability(void *regmap)
+{
+ u32 regval;
+ int i, rc;
+
+ rc = regmap_read(regmap, MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ &regval);
+ if (rc)
+ return rc;
+
+ if (!(regval & ~MLXPLAT_CPLD_WD_CPBLTY_MASK)) {
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++) {
+ if (mlxplat_wd_data[i])
+ mlxplat_wd_data[i] =
+ &mlxplat_mlxcpld_wd_set_type3[i];
+ }
+ }
+
+ return 0;
+}
+
static int __init mlxplat_init(void)
{
struct mlxplat_priv *priv;
@@ -2874,6 +2977,9 @@ static int __init mlxplat_init(void)
}
/* Add WD drivers. */
+ err = mlxplat_mlxcpld_check_wd_capability(priv->regmap);
+ if (err)
+ goto fail_platform_wd_register;
for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
if (mlxplat_wd_data[j]) {
mlxplat_wd_data[j]->regmap = priv->regmap;
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index ec515223f654..d8afed5db94c 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -84,28 +84,28 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
/* Power button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
key_code = KEY_POWER;
break;
/* Home button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
key_code = KEY_LEFTMETA;
break;
/* Volume up button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
key_code = KEY_VOLUMEUP;
break;
/* Volume down button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 4864a5c189d4..9c4df41687a3 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4060,7 +4060,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
* AC status changed; can be triggered by plugging or
* unplugging AC adapter, docking or undocking. */
- /* fallthrough */
+ fallthrough;
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
@@ -4176,7 +4176,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = true;
break;
}
- /* fallthrough - to default */
+ fallthrough; /* to default */
default:
known_ev = false;
}
@@ -6266,7 +6266,7 @@ static int thermal_get_sensor(int idx, s32 *value)
idx -= 8;
}
#endif
- /* fallthrough */
+ fallthrough;
case TPACPI_THERMAL_TPEC_8:
if (idx <= 7) {
if (!acpi_ec_read(t + idx, &tmp))
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 36fff00af9eb..e557d757c647 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2748,7 +2748,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
if (result == TOS_SUCCESS)
pr_notice("Re-enabled hotkeys\n");
- /* Fall through */
+ fallthrough;
default:
retries--;
break;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 9469fe182d02..db65be026920 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -748,7 +748,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
USB_CH_IP_CUR_LVL_1P5;
break;
}
- /* else, fall through */
+ fallthrough;
case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
@@ -2410,7 +2410,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
* of 1sec for enabling charging
*/
msleep(1000);
- /* Intentional fall through */
+ fallthrough;
case AB8500_BM_USB_STATE_CONFIGURED:
/*
* USB is configured, enable charging with the charging
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 751c4f6c7487..7eec415c82a3 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1542,7 +1542,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_INITMEASURING);
- /* Intentional fallthrough */
+ fallthrough;
case AB8500_FG_DISCHARGE_INITMEASURING:
/*
* Discard a number of samples during startup.
@@ -1572,7 +1572,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_RECOVERY);
- /* Intentional fallthrough */
+ fallthrough;
case AB8500_FG_DISCHARGE_RECOVERY:
sleep_time = di->bm->fg_params->recovery_sleep_timer;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 2fb33a07879a..175c4f3d7955 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1419,7 +1419,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
abx500_chargalg_stop_charging(di);
di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
abx500_chargalg_state_to(di, STATE_HANDHELD);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_HANDHELD:
break;
@@ -1435,7 +1435,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
di->maintenance_chg = false;
abx500_chargalg_state_to(di, STATE_SUSPENDED);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_SUSPENDED:
/* CHARGING is suspended */
@@ -1444,7 +1444,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_BATT_REMOVED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_BATT_REMOVED:
if (!di->events.batt_rem)
@@ -1454,7 +1454,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_HW_TEMP_PROTECT_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_HW_TEMP_PROTECT:
if (!di->events.main_thermal_prot &&
@@ -1465,7 +1465,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_OVV_PROTECT_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_OVV_PROTECT:
if (!di->events.vbus_ovv &&
@@ -1479,7 +1479,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_CHG_NOT_OK_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_CHG_NOT_OK:
if (!di->events.mainextchnotok &&
@@ -1490,7 +1490,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_SAFETY_TIMER_EXPIRED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_SAFETY_TIMER_EXPIRED:
/* We exit this state when charger is removed */
@@ -1537,7 +1537,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_WAIT_FOR_RECHARGE_INIT:
abx500_chargalg_hold_charging(di);
abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_WAIT_FOR_RECHARGE:
if (di->batt_data.percent <=
@@ -1558,7 +1558,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
di->bm->batt_id].maint_a_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough*/
+ fallthrough;
case STATE_MAINTENANCE_A:
if (di->events.maintenance_timer_expired) {
@@ -1578,7 +1578,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
di->bm->batt_id].maint_b_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough*/
+ fallthrough;
case STATE_MAINTENANCE_B:
if (di->events.maintenance_timer_expired) {
@@ -1597,7 +1597,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_TEMP_LOWHIGH:
if (!di->events.btemp_lowhigh)
@@ -1607,7 +1607,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_WD_EXPIRED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_WD_EXPIRED:
if (!di->events.ac_wd_expired &&
@@ -1618,7 +1618,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_TEMP_UNDEROVER_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_TEMP_UNDEROVER:
if (!di->events.btemp_underover)
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index d01dc0332edc..0eaa86c52874 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -349,7 +349,7 @@ static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
case 100000:
if (power->axp20x_id == AXP221_ID)
return -EINVAL;
- /* fall through */
+ fallthrough;
case 500000:
case 900000:
val = (900000 - intval) / 400000;
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 2a45e84447fe..d89e08efd2ad 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -383,7 +383,7 @@ static int cros_usbpd_charger_get_prop(struct power_supply *psy,
*/
if (ec_device->mkbp_event_supported || port->psy_online)
break;
- /* fall through */
+ fallthrough;
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 5fca4960f440..8878f9131184 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -121,7 +121,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
- /* Fall through */
+ fallthrough;
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index 65832bc229f6..18b33f14dfee 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -665,7 +665,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "Failed to find USB phy: %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto err_bat_irq;
break;
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 26923af574f4..e05cee457471 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -227,7 +227,7 @@ static irqreturn_t wm8350_charger_handler(int irq, void *data)
case WM8350_IRQ_EXT_USB_FB:
case WM8350_IRQ_EXT_WALL_FB:
wm8350_charger_config(wm8350, policy);
- /* Fall through */
+ fallthrough;
case WM8350_IRQ_EXT_BAT_FB:
power_supply_changed(power->battery);
power_supply_changed(power->usb);
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 24f04ffdd986..9d66257e1da5 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -769,7 +769,7 @@ static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf)
switch (info->monitor_type) {
case PS3AV_MONITOR_TYPE_DVI:
dvi = PS3AV_MODE_DVI;
- /* fall through */
+ fallthrough;
case PS3AV_MONITOR_TYPE_HDMI:
id = ps3av_hdmi_get_id(info);
break;
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index f0e650cc866e..c22206652f06 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -693,11 +693,11 @@ void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport,
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_enable[3] = 1;
- /* fall through */
+ fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_enable[2] = 1;
audio->audio_enable[1] = 1;
- /* fall through */
+ fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_enable[0] = 1;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 73aaae5574ed..e020faff7da5 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -142,16 +142,15 @@ static int idtcm_strverscmp(const char *ver1, const char *ver2)
return result;
}
-static int idtcm_xfer(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count,
- bool write)
+static int idtcm_xfer_read(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
{
struct i2c_client *client = idtcm->client;
struct i2c_msg msg[2];
int cnt;
- char *fmt = "i2c_transfer failed at %d in %s for %s, at addr: %04X!\n";
+ char *fmt = "i2c_transfer failed at %d in %s, at addr: %04X!\n";
msg[0].addr = client->addr;
msg[0].flags = 0;
@@ -159,7 +158,7 @@ static int idtcm_xfer(struct idtcm *idtcm,
msg[0].buf = &regaddr;
msg[1].addr = client->addr;
- msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].flags = I2C_M_RD;
msg[1].len = count;
msg[1].buf = buf;
@@ -170,7 +169,6 @@ static int idtcm_xfer(struct idtcm *idtcm,
fmt,
__LINE__,
__func__,
- write ? "write" : "read",
regaddr);
return cnt;
} else if (cnt != 2) {
@@ -182,6 +180,37 @@ static int idtcm_xfer(struct idtcm *idtcm,
return 0;
}
+static int idtcm_xfer_write(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idtcm->client;
+ /* we add 1 byte for device register */
+ u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
+ int cnt;
+ char *fmt = "i2c_master_send failed at %d in %s, at addr: %04X!\n";
+
+ if (count > IDTCM_MAX_WRITE_COUNT)
+ return -EINVAL;
+
+ msg[0] = regaddr;
+ memcpy(&msg[1], buf, count);
+
+ cnt = i2c_master_send(client, msg, count + 1);
+
+ if (cnt < 0) {
+ dev_err(&client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ regaddr);
+ return cnt;
+ }
+
+ return 0;
+}
+
static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
{
u8 buf[4];
@@ -195,7 +224,7 @@ static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
buf[2] = 0x10;
buf[3] = 0x20;
- err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+ err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
if (err) {
idtcm->page_offset = 0xff;
@@ -223,11 +252,12 @@ static int _idtcm_rdwr(struct idtcm *idtcm,
err = idtcm_page_offset(idtcm, hi);
if (err)
- goto out;
+ return err;
- err = idtcm_xfer(idtcm, lo, buf, count, write);
-out:
- return err;
+ if (write)
+ return idtcm_xfer_write(idtcm, lo, buf, count);
+
+ return idtcm_xfer_read(idtcm, lo, buf, count);
}
static int idtcm_read(struct idtcm *idtcm,
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index ffae56c5d97f..82840d72364a 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -55,6 +55,8 @@
#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+#define IDTCM_MAX_WRITE_COUNT (512)
+
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
PLL_MODE_MIN = 0,
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 004b2ea9b5fd..276e939a5684 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -510,12 +510,12 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
last->period > s2.period &&
last->period <= state->period)
dev_warn(chip->dev,
- ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n",
+ ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n",
state->period, s2.period, last->period);
if (state->enabled && state->period < s2.period)
dev_warn(chip->dev,
- ".apply is supposed to round down period (requested: %u, applied: %u)\n",
+ ".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
state->period, s2.period);
if (state->enabled &&
@@ -524,14 +524,14 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
last->duty_cycle > s2.duty_cycle &&
last->duty_cycle <= state->duty_cycle)
dev_warn(chip->dev,
- ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n",
+ ".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period,
last->duty_cycle, last->period);
if (state->enabled && state->duty_cycle < s2.duty_cycle)
dev_warn(chip->dev,
- ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n",
+ ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period);
@@ -558,7 +558,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
(s1.enabled && s1.period != last->period) ||
(s1.enabled && s1.duty_cycle != last->duty_cycle)) {
dev_err(chip->dev,
- ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n",
+ ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n",
s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
last->enabled, last->polarity, last->duty_cycle,
last->period);
@@ -1284,8 +1284,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
if (state.enabled)
seq_puts(s, " enabled");
- seq_printf(s, " period: %u ns", state.period);
- seq_printf(s, " duty: %u ns", state.duty_cycle);
+ seq_printf(s, " period: %llu ns", state.period);
+ seq_printf(s, " duty: %llu ns", state.duty_cycle);
seq_printf(s, " polarity: %s",
state.polarity ? "inverse" : "normal");
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 1f829edd8ee7..79b1e58e946d 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
u64 tmp, multi, rate;
u32 value, prescale;
- rate = clk_get_rate(ip->clk);
-
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
else
state->polarity = PWM_POLARITY_INVERSED;
+ rate = clk_get_rate(ip->clk);
+ if (rate == 0) {
+ state->period = 0;
+ state->duty_cycle = 0;
+ return;
+ }
+
value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
prescale &= IPROC_PWM_PRESCALE_MAX;
@@ -143,8 +148,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
value = rate * state->duty_cycle;
duty = div64_u64(value, div);
- if (period < IPROC_PWM_PERIOD_MIN ||
- duty < IPROC_PWM_DUTY_CYCLE_MIN)
+ if (period < IPROC_PWM_PERIOD_MIN)
return -EINVAL;
if (period <= IPROC_PWM_PERIOD_MAX &&
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 81da91df2529..16c5898b934a 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -138,7 +138,7 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
dc = div64_u64(val, div);
/* If duty_ns or period_ns are not achievable then return */
- if (pc < PERIOD_COUNT_MIN || dc < DUTY_CYCLE_HIGH_MIN)
+ if (pc < PERIOD_COUNT_MIN)
return -EINVAL;
/* If pc and dc are in bounds, the calculation is done */
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 924d39a797cf..ba9500aca078 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -43,7 +43,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
{
/* Duty cycle 0..15 max */
- return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period);
+ return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period);
}
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 5f3d7f7e6aef..fcdf6befb838 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -124,7 +124,7 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
real_state->duty_cycle = state->duty_cycle;
tmp = (u64)p->mod * real_state->duty_cycle;
- p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period);
+ p->val = DIV64_U64_ROUND_CLOSEST(tmp, real_state->period);
real_state->polarity = state->polarity;
real_state->enabled = state->enabled;
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 732a6f3701e8..c50d453552bd 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -202,7 +202,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
sr = readl(imx->mmio_base + MX3_PWMSR);
fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr);
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
- period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
+ period_ms = DIV_ROUND_UP_ULL(pwm_get_period(pwm),
NSEC_PER_MSEC);
msleep(period_ms);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 674f0e238ba0..7d33e3646436 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -25,10 +25,10 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#define IQS620_PWR_SETTINGS 0xD2
+#define IQS620_PWR_SETTINGS 0xd2
#define IQS620_PWR_SETTINGS_PWM_OUT BIT(7)
-#define IQS620_PWM_DUTY_CYCLE 0xD8
+#define IQS620_PWM_DUTY_CYCLE 0xd8
#define IQS620_PWM_PERIOD_NS 1000000
@@ -46,7 +46,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct iqs620_pwm_private *iqs620_pwm;
struct iqs62x_core *iqs62x;
- int duty_scale, ret;
+ u64 duty_scale;
+ int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
return -ENOTSUPP;
@@ -69,7 +70,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* For lower duty cycles (e.g. 0), the PWM output is simply disabled to
* allow an external pull-down resistor to hold the GPIO3/LTX pin low.
*/
- duty_scale = state->duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
+ duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS);
mutex_lock(&iqs620_pwm->lock);
@@ -81,7 +82,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (duty_scale) {
- u8 duty_val = min(duty_scale - 1, 0xFF);
+ u8 duty_val = min_t(u64, duty_scale - 1, 0xff);
ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
duty_val);
@@ -93,7 +94,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled && duty_scale) {
ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
- IQS620_PWR_SETTINGS_PWM_OUT, 0xFF);
+ IQS620_PWR_SETTINGS_PWM_OUT, 0xff);
if (ret)
goto err_mutex;
}
@@ -159,7 +160,7 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
IQS620_PWR_SETTINGS_PWM_OUT,
- iqs620_pwm->out_en ? 0xFF : 0);
+ iqs620_pwm->out_en ? 0xff : 0);
err_mutex:
mutex_unlock(&iqs620_pwm->lock);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b94e0d09c300..ab001ce55178 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -46,6 +46,7 @@ struct pwm_mediatek_of_data {
* @clk_main: the clock used by PWM core
* @clk_pwms: the clock used by each PWM channel
* @clk_freq: the fix clock frequency of legacy MIPS SoC
+ * @soc: pointer to chip's platform data
*/
struct pwm_mediatek_chip {
struct pwm_chip chip;
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 0d31833db2e2..358db4ff9d4f 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -14,7 +14,7 @@
* with a timer counter that goes up. When it overflows it gets
* reloaded with the load value and the pwm output goes up.
* When counter matches with match register, the output goes down.
- * Reference Manual: http://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
+ * Reference Manual: https://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
*
* Limitations:
* - When PWM is stopped, timer counter gets stopped immediately. This
@@ -58,7 +58,7 @@
* @mutex: Mutex to protect pwm apply state
* @dm_timer: Pointer to omap dm timer.
* @pdata: Pointer to omap dm timer ops.
- * dm_timer_pdev: Pointer to omap dm timer platform device
+ * @dm_timer_pdev: Pointer to omap dm timer platform device
*/
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index cc63f9baa481..62de0bb85921 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -181,7 +181,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* consecutively
*/
num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH);
- frac = DIV_ROUND_CLOSEST_ULL(num, state->period);
+ frac = DIV64_U64_ROUND_CLOSEST(num, state->period);
/* The hardware cannot generate a 100% duty cycle */
frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 67fca62524dc..134c14621ee0 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -61,7 +61,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(div, NSEC_PER_SEC);
if (!div) {
/* Clock is too slow to achieve requested period. */
- dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period);
+ dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period);
return -EINVAL;
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 18fbbe3277d0..961c59c99bb3 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -285,7 +285,7 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
- usecs_to_jiffies(cstate.period / 1000 + 1);
+ nsecs_to_jiffies(cstate.period + 1000);
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index ab38c8203b79..683804c7d26c 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -2,7 +2,7 @@
/*
* ECAP PWM driver
*
- * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/
*/
#include <linux/module.h>
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 7b4c770ce9d6..0846917ff2d2 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -2,7 +2,7 @@
/*
* EHRPWM PWM driver
*
- * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/
*/
#include <linux/module.h>
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 2389b8669846..449dbc0f49ed 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%u\n", state.period);
+ return sprintf(buf, "%llu\n", state.period);
}
static ssize_t period_store(struct device *child,
@@ -52,10 +52,10 @@ static ssize_t period_store(struct device *child,
struct pwm_export *export = child_to_pwm_export(child);
struct pwm_device *pwm = export->pwm;
struct pwm_state state;
- unsigned int val;
+ u64 val;
int ret;
- ret = kstrtouint(buf, 0, &val);
+ ret = kstrtou64(buf, 0, &val);
if (ret)
return ret;
@@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%u\n", state.duty_cycle);
+ return sprintf(buf, "%llu\n", state.duty_cycle);
}
static ssize_t duty_cycle_store(struct device *child,
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 451608e960a1..a30342942e26 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -981,7 +981,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
if (unlikely(copy_from_user(transfer,
(void __user *)(uintptr_t)transaction.block,
- transaction.count * sizeof(*transfer)))) {
+ array_size(sizeof(*transfer), transaction.count)))) {
ret = -EFAULT;
goto out_free;
}
@@ -994,7 +994,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
transfer,
- transaction.count * sizeof(*transfer))))
+ array_size(sizeof(*transfer), transaction.count))))
ret = -EFAULT;
out_free:
@@ -1710,8 +1710,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (rval & RIO_PEF_SWITCH) {
rio_mport_read_config_32(mport, destid, hopcount,
RIO_SWP_INFO_CAR, &swpinfo);
- size += (RIO_GET_TOTAL_PORTS(swpinfo) *
- sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+ size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
}
rdev = kzalloc(size, GFP_KERNEL);
@@ -2151,7 +2150,7 @@ static void mport_release_mapping(struct kref *ref)
switch (map->dir) {
case MAP_INBOUND:
rio_unmap_inb_region(mport, map->phys_addr);
- /* fall through */
+ fallthrough;
case MAP_DMA:
dma_free_coherent(mport->dev.parent, map->size,
map->virt_addr, map->phys_addr);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index eb8ed28533f8..19b0c33f4a62 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -330,7 +330,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
size_t size;
u32 swpinfo = 0;
- size = sizeof(struct rio_dev);
+ size = sizeof(*rdev);
if (rio_mport_read_config_32(port, destid, hopcount,
RIO_PEF_CAR, &result))
return NULL;
@@ -338,10 +338,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
rio_mport_read_config_32(port, destid, hopcount,
RIO_SWP_INFO_CAR, &swpinfo);
- if (result & RIO_PEF_SWITCH) {
- size += (RIO_GET_TOTAL_PORTS(swpinfo) *
- sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
- }
+ if (result & RIO_PEF_SWITCH)
+ size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
}
rdev = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index fbc95cadaf53..1bacb37e8a99 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -399,7 +399,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
if (rate_count > 0)
break;
- /* fall through */
+ fallthrough;
default:
/* Not supported for this regulator */
return -ENOTSUPP;
@@ -1022,7 +1022,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
- /* Fall through - to the check below.*/
+ fallthrough; /* to the check below */
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
@@ -1030,7 +1030,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
*/
if (axp20x->variant == AXP806_ID)
reg = AXP806_DCDC_FREQ_CTRL;
- /* Fall through */
+ fallthrough;
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
@@ -1118,7 +1118,7 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP806_DCDC_MODE_CTRL2;
- /* Fall through - to the check below. */
+ fallthrough; /* to the check below */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 75ff7c563c5d..3fd359914690 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1895,7 +1895,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
case EXCLUSIVE_GET:
dev_warn(dev,
"dummy supplies not allowed for exclusive requests\n");
- /* fall through */
+ fallthrough;
default:
return ERR_PTR(-ENODEV);
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 44e4cecbf6de..87b020d0b958 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -319,7 +319,7 @@ static int slg51000_regulator_init(struct slg51000 *chip)
rdesc->linear_min_sel = 0;
break;
}
- /* Fall through - to the check below.*/
+ fallthrough; /* to the check below */
default:
rdesc->linear_min_sel = vsel_range[0];
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index f7db250a7583..430265c404d6 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -312,7 +312,7 @@ static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
switch (info->flags) {
case SMPS_OFFSET_EN:
voltage = 100000;
- /* fall through */
+ fallthrough;
case 0:
switch (index) {
case 0:
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index c4d1731295eb..c6659dfea7c7 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -14,6 +14,15 @@ config REMOTEPROC
if REMOTEPROC
+config REMOTEPROC_CDEV
+ bool "Remoteproc character device interface"
+ help
+ Say y here to have a character device interface for the remoteproc
+ framework. Userspace can boot/shutdown remote processors through
+ this interface.
+
+ It's safe to say N if you don't want to use this interface.
+
config IMX_REMOTEPROC
tristate "IMX6/7 remoteproc support"
depends on ARCH_MXC
@@ -116,6 +125,9 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
+config QCOM_PIL_INFO
+ tristate
+
config QCOM_RPROC_COMMON
tristate
@@ -132,6 +144,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
@@ -148,8 +161,8 @@ config QCOM_Q6V5_MSS
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
- select QCOM_Q6V5_IPA_NOTIFY
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -164,6 +177,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
@@ -182,6 +196,7 @@ config QCOM_Q6V5_WCSS
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -189,9 +204,6 @@ config QCOM_Q6V5_WCSS
Say y here to support the Qualcomm Peripheral Image Loader for the
Hexagon V5 based WCSS remote processors.
-config QCOM_Q6V5_IPA_NOTIFY
- tristate
-
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
@@ -215,6 +227,7 @@ config QCOM_WCNSS_PIL
depends on QCOM_SMEM
depends on QCOM_SYSMON || QCOM_SYSMON=n
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -249,6 +262,19 @@ config STM32_RPROC
This can be either built-in or a loadable module.
+config TI_K3_DSP_REMOTEPROC
+ tristate "TI K3 DSP remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's C66x and C71x DSP remote processor
+ subsystems on various TI K3 family of SoCs through the remote
+ processor framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ the DSP slave processors.
+
endif # REMOTEPROC
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index e8b886e511f0..3dfa28e6c701 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -5,10 +5,12 @@
obj-$(CONFIG_REMOTEPROC) += remoteproc.o
remoteproc-y := remoteproc_core.o
+remoteproc-y += remoteproc_coredump.o
remoteproc-y += remoteproc_debugfs.o
remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
+obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
@@ -16,13 +18,13 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
-obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
@@ -30,3 +32,4 @@ qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 189020d77b25..1c2b21a5d178 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
@@ -62,6 +61,28 @@ struct vpu {
struct device *dev;
};
+static int ingenic_rproc_prepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ int ret;
+
+ /* The clocks must be enabled for the firmware to be loaded in TCSM */
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret)
+ dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
+
+ return ret;
+}
+
+static int ingenic_rproc_unprepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+ return 0;
+}
+
static int ingenic_rproc_start(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
@@ -115,6 +136,8 @@ static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
}
static struct rproc_ops ingenic_rproc_ops = {
+ .prepare = ingenic_rproc_prepare,
+ .unprepare = ingenic_rproc_unprepare,
.start = ingenic_rproc_start,
.stop = ingenic_rproc_stop,
.kick = ingenic_rproc_kick,
@@ -135,16 +158,6 @@ static irqreturn_t vpu_interrupt(int irq, void *data)
return rproc_vq_interrupt(rproc, vring);
}
-static void ingenic_rproc_disable_clks(void *data)
-{
- struct vpu *vpu = data;
-
- pm_runtime_resume(vpu->dev);
- pm_runtime_disable(vpu->dev);
-
- clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
static int ingenic_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -206,35 +219,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
disable_irq(vpu->irq);
- /* The clocks must be enabled for the firmware to be loaded in TCSM */
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
- if (ret) {
- dev_err(dev, "Unable to start clocks\n");
- return ret;
- }
-
- pm_runtime_irq_safe(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- pm_runtime_use_autosuspend(dev);
-
- ret = devm_add_action_or_reset(dev, ingenic_rproc_disable_clks, vpu);
- if (ret) {
- dev_err(dev, "Unable to register action\n");
- goto out_pm_put;
- }
-
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "Failed to register remote processor\n");
- goto out_pm_put;
+ return ret;
}
-out_pm_put:
- pm_runtime_put_autosuspend(dev);
-
- return ret;
+ return 0;
}
static const struct of_device_id ingenic_rproc_of_matches[] = {
@@ -243,33 +234,10 @@ static const struct of_device_id ingenic_rproc_of_matches[] = {
};
MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
-static int __maybe_unused ingenic_rproc_suspend(struct device *dev)
-{
- struct vpu *vpu = dev_get_drvdata(dev);
-
- clk_bulk_disable(ARRAY_SIZE(vpu->clks), vpu->clks);
-
- return 0;
-}
-
-static int __maybe_unused ingenic_rproc_resume(struct device *dev)
-{
- struct vpu *vpu = dev_get_drvdata(dev);
-
- return clk_bulk_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
-static const struct dev_pm_ops __maybe_unused ingenic_rproc_pm = {
- SET_RUNTIME_PM_OPS(ingenic_rproc_suspend, ingenic_rproc_resume, NULL)
-};
-
static struct platform_driver ingenic_rproc_driver = {
.probe = ingenic_rproc_probe,
.driver = {
.name = "ingenic-vpu",
-#ifdef CONFIG_PM
- .pm = &ingenic_rproc_pm,
-#endif
.of_match_table = ingenic_rproc_of_matches,
},
};
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 6955fab0a78b..d94b7391bf9d 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -511,7 +511,6 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
dev_info(dev, "received echo reply from %s\n", name);
break;
case RP_MBOX_SUSPEND_ACK:
- /* Fall through */
case RP_MBOX_SUSPEND_CANCEL:
oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
complete(&oproc->pm_comp);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 9028cea2d81e..085fd73fa23a 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
+#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "remoteproc_internal.h"
@@ -23,7 +25,14 @@
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
-static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
+struct qcom_ssr_subsystem {
+ const char *name;
+ struct srcu_notifier_head notifier_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(qcom_ssr_subsystem_list);
+static DEFINE_MUTEX(qcom_ssr_subsys_lock);
static int glink_subdev_start(struct rproc_subdev *subdev)
{
@@ -189,37 +198,122 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
}
EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
+static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
+{
+ struct qcom_ssr_subsystem *info;
+
+ mutex_lock(&qcom_ssr_subsys_lock);
+ /* Match in the global qcom_ssr_subsystem_list with name */
+ list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
+ if (!strcmp(info->name, name))
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ info = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ info->name = kstrdup_const(name, GFP_KERNEL);
+ srcu_init_notifier_head(&info->notifier_list);
+
+ /* Add to global notification list */
+ list_add_tail(&info->list, &qcom_ssr_subsystem_list);
+
+out:
+ mutex_unlock(&qcom_ssr_subsys_lock);
+ return info;
+}
+
/**
* qcom_register_ssr_notifier() - register SSR notification handler
- * @nb: notifier_block to notify for restart notifications
+ * @name: Subsystem's SSR name
+ * @nb: notifier_block to be invoked upon subsystem's state change
*
- * Returns 0 on success, negative errno on failure.
+ * This registers the @nb notifier block as part the notifier chain for a
+ * remoteproc associated with @name. The notifier block's callback
+ * will be invoked when the remote processor's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*
- * This register the @notify function as handler for restart notifications. As
- * remote processors are stopped this function will be called, with the SSR
- * name passed as a parameter.
+ * Return: a subsystem cookie on success, ERR_PTR on failure.
*/
-int qcom_register_ssr_notifier(struct notifier_block *nb)
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&ssr_notifiers, nb);
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(name);
+ if (IS_ERR(info))
+ return info;
+
+ srcu_notifier_chain_register(&info->notifier_list, nb);
+
+ return &info->notifier_list;
}
EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
/**
* qcom_unregister_ssr_notifier() - unregister SSR notification handler
+ * @notify: subsystem cookie returned from qcom_register_ssr_notifier
* @nb: notifier_block to unregister
+ *
+ * This function will unregister the notifier from the particular notifier
+ * chain.
+ *
+ * Return: 0 on success, %ENOENT otherwise.
*/
-void qcom_unregister_ssr_notifier(struct notifier_block *nb)
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&ssr_notifiers, nb);
+ return srcu_notifier_chain_unregister(notify, nb);
}
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
+static int ssr_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_POWERUP, &data);
+ return 0;
+}
+
+static int ssr_notify_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_POWERUP, &data);
+ return 0;
+}
+
+static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = crashed,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_SHUTDOWN, &data);
+}
+
static void ssr_notify_unprepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
- blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name);
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_SHUTDOWN, &data);
}
/**
@@ -229,12 +323,24 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev)
* @ssr_name: identifier to use for notifications originating from @rproc
*
* As the @ssr is registered with the @rproc SSR events will be sent to all
- * registered listeners in the system as the remoteproc is shut down.
+ * registered listeners for the remoteproc when it's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*/
void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name)
{
- ssr->name = ssr_name;
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(ssr_name);
+ if (IS_ERR(info)) {
+ dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
+ return;
+ }
+
+ ssr->info = info;
+ ssr->subdev.prepare = ssr_notify_prepare;
+ ssr->subdev.start = ssr_notify_start;
+ ssr->subdev.stop = ssr_notify_stop;
ssr->subdev.unprepare = ssr_notify_unprepare;
rproc_add_subdev(rproc, &ssr->subdev);
@@ -249,6 +355,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
{
rproc_remove_subdev(rproc, &ssr->subdev);
+ ssr->info = NULL;
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 34e5188187dc..dfc641c3a98b 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -26,10 +26,11 @@ struct qcom_rproc_subdev {
struct qcom_smd_edge *edge;
};
+struct qcom_ssr_subsystem;
+
struct qcom_rproc_ssr {
struct rproc_subdev subdev;
-
- const char *name;
+ struct qcom_ssr_subsystem *info;
};
void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
new file mode 100644
index 000000000000..5521c4437ffa
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include "qcom_pil_info.h"
+
+/*
+ * The PIL relocation information region is used to communicate memory regions
+ * occupied by co-processor firmware for post mortem crash analysis.
+ *
+ * It consists of an array of entries with an 8 byte textual identifier of the
+ * region followed by a 64 bit base address and 32 bit size, both little
+ * endian.
+ */
+#define PIL_RELOC_NAME_LEN 8
+#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
+
+struct pil_reloc {
+ void __iomem *base;
+ size_t num_entries;
+};
+
+static struct pil_reloc _reloc __read_mostly;
+static DEFINE_MUTEX(pil_reloc_lock);
+
+static int qcom_pil_info_init(void)
+{
+ struct device_node *np;
+ struct resource imem;
+ void __iomem *base;
+ int ret;
+
+ /* Already initialized? */
+ if (_reloc.base)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &imem);
+ of_node_put(np);
+ if (ret < 0)
+ return ret;
+
+ base = ioremap(imem.start, resource_size(&imem));
+ if (!base) {
+ pr_err("failed to map PIL relocation info region\n");
+ return -ENOMEM;
+ }
+
+ memset_io(base, 0, resource_size(&imem));
+
+ _reloc.base = base;
+ _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+
+ return 0;
+}
+
+/**
+ * qcom_pil_info_store() - store PIL information of image in IMEM
+ * @image: name of the image
+ * @base: base address of the loaded image
+ * @size: size of the loaded image
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
+{
+ char buf[PIL_RELOC_NAME_LEN];
+ void __iomem *entry;
+ int ret;
+ int i;
+
+ mutex_lock(&pil_reloc_lock);
+ ret = qcom_pil_info_init();
+ if (ret < 0) {
+ mutex_unlock(&pil_reloc_lock);
+ return ret;
+ }
+
+ for (i = 0; i < _reloc.num_entries; i++) {
+ entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
+
+ memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
+
+ /*
+ * An empty record means we didn't find it, given that the
+ * records are packed.
+ */
+ if (!buf[0])
+ goto found_unused;
+
+ if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
+ goto found_existing;
+ }
+
+ pr_warn("insufficient PIL info slots\n");
+ mutex_unlock(&pil_reloc_lock);
+ return -ENOMEM;
+
+found_unused:
+ memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
+found_existing:
+ /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+ writel(base, entry + PIL_RELOC_NAME_LEN);
+ writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
+ writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
+ mutex_unlock(&pil_reloc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_pil_info_store);
+
+static void __exit pil_reloc_exit(void)
+{
+ mutex_lock(&pil_reloc_lock);
+ iounmap(_reloc.base);
+ _reloc.base = NULL;
+ mutex_unlock(&pil_reloc_lock);
+}
+module_exit(pil_reloc_exit);
+
+MODULE_DESCRIPTION("Qualcomm PIL relocation info");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h
new file mode 100644
index 000000000000..0dce6142935e
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PIL_INFO_H__
+#define __QCOM_PIL_INFO_H__
+
+#include <linux/types.h>
+
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 111a442c993c..fd6fd36268d9 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -153,6 +153,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
{
int ret;
+ q6v5->running = false;
+
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index d2a2574dcf35..efb2c1aa80a3 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -26,6 +26,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
@@ -82,6 +83,7 @@ struct qcom_adsp {
unsigned int halt_lpass;
int crash_reason_smem;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -164,10 +166,17 @@ reset:
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ adsp->mem_region, adsp->mem_phys,
+ adsp->mem_size, &adsp->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
- return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -436,6 +445,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
ret = adsp_alloc_memory_region(adsp);
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
deleted file mode 100644
index e1c10a128bfd..000000000000
--- a/drivers/remoteproc/qcom_q6v5_ipa_notify.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * Qualcomm IPA notification subdev support
- *
- * Copyright (C) 2019 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/remoteproc.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
-
-static void
-ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
-{
- struct qcom_rproc_ipa_notify *ipa_notify;
- qcom_ipa_notify_t notify;
-
- ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
- notify = ipa_notify->notify;
- if (notify)
- notify(ipa_notify->data, event);
-}
-
-static int ipa_notify_prepare(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_STARTING);
-
- return 0;
-}
-
-static int ipa_notify_start(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_RUNNING);
-
- return 0;
-}
-
-static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
-
-{
- ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
-}
-
-static void ipa_notify_unprepare(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_OFFLINE);
-}
-
-static void ipa_notify_removing(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_REMOVING);
-}
-
-/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
-void qcom_add_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify)
-{
- ipa_notify->notify = NULL;
- ipa_notify->data = NULL;
- ipa_notify->subdev.prepare = ipa_notify_prepare;
- ipa_notify->subdev.start = ipa_notify_start;
- ipa_notify->subdev.stop = ipa_notify_stop;
- ipa_notify->subdev.unprepare = ipa_notify_unprepare;
-
- rproc_add_subdev(rproc, &ipa_notify->subdev);
-}
-EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
-
-/* Remove the IPA notification subdevice */
-void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify)
-{
- struct rproc_subdev *subdev = &ipa_notify->subdev;
-
- ipa_notify_removing(subdev);
-
- rproc_remove_subdev(rproc, subdev);
- ipa_notify->notify = NULL; /* Make it obvious */
-}
-EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 903b2bb97e12..c401bcc263fa 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -22,7 +23,6 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
-#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
@@ -30,12 +30,15 @@
#include "remoteproc_internal.h"
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include <linux/qcom_scm.h>
#define MPSS_CRASH_REASON_SMEM 421
+#define MBA_LOG_SIZE SZ_4K
+
/* RMB Status Register Values */
#define RMB_PBL_SUCCESS 0x1
@@ -112,8 +115,6 @@
#define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
-#define QDSP6SS_BOOT_STATUS 0x408
-#define BOOT_STATUS_TIMEOUT_US 200
#define BOOT_FSM_TIMEOUT 10000
struct reg_info {
@@ -140,6 +141,7 @@ struct rproc_hexagon_res {
int version;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
bool has_spare_reg;
};
@@ -179,15 +181,14 @@ struct q6v5 {
int active_reg_count;
int proxy_reg_count;
- bool running;
-
bool dump_mba_loaded;
- unsigned long dump_segment_mask;
- unsigned long dump_complete_mask;
+ size_t current_dump_size;
+ size_t total_dump_size;
phys_addr_t mba_phys;
void *mba_region;
size_t mba_size;
+ size_t dp_size;
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
@@ -196,10 +197,10 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
- struct qcom_rproc_ipa_notify ipa_notify_subdev;
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
bool has_spare_reg;
int mpss_perm;
int mba_perm;
@@ -404,11 +405,33 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
current_perm, next, perms);
}
+static void q6v5_debug_policy_load(struct q6v5 *qproc)
+{
+ const struct firmware *dp_fw;
+
+ if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
+ return;
+
+ if (SZ_1M + dp_fw->size <= qproc->mba_size) {
+ memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+ qproc->dp_size = dp_fw->size;
+ }
+
+ release_firmware(dp_fw);
+}
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
+ /* MBA is restricted to a maximum size of 1M */
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+ dev_err(qproc->dev, "MBA firmware load failed\n");
+ return -EINVAL;
+ }
+
memcpy(qproc->mba_region, fw->data, fw->size);
+ q6v5_debug_policy_load(qproc);
return 0;
}
@@ -511,6 +534,26 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
return val;
}
+static void q6v5_dump_mba_logs(struct q6v5 *qproc)
+{
+ struct rproc *rproc = qproc->rproc;
+ void *data;
+
+ if (!qproc->has_mba_logs)
+ return;
+
+ if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
+ qproc->mba_size))
+ return;
+
+ data = vmalloc(MBA_LOG_SIZE);
+ if (!data)
+ return;
+
+ memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
+ dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+}
+
static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
@@ -579,13 +622,15 @@ static int q6v5proc_reset(struct q6v5 *qproc)
/* De-assert the Q6 stop core signal */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+ /* Wait for 10 us for any staggering logic to settle */
+ usleep_range(10, 20);
+
/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
- /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
- ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
- val, (val & BIT(0)) != 0, 1,
- BOOT_STATUS_TIMEOUT_US);
+ /* Poll the MSS_STATUS for FSM completion */
+ ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n");
/* Reset the modem so that boot FSM is in reset state */
@@ -829,6 +874,7 @@ static int q6v5_mba_load(struct q6v5 *qproc)
{
int ret;
int xfermemop_ret;
+ bool mba_load_err = false;
qcom_q6v5_prepare(&qproc->q6v5);
@@ -895,6 +941,10 @@ static int q6v5_mba_load(struct q6v5 *qproc)
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+ if (qproc->dp_size) {
+ writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
ret = q6v5proc_reset(qproc);
if (ret)
@@ -918,7 +968,7 @@ halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
+ mba_load_err = true;
reclaim_mba:
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
false, qproc->mba_phys,
@@ -926,6 +976,8 @@ reclaim_mba:
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
+ } else if (mba_load_err) {
+ q6v5_dump_mba_logs(qproc);
}
disable_active_clks:
@@ -961,6 +1013,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
u32 val;
qproc->dump_mba_loaded = false;
+ qproc->dp_size = 0;
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
@@ -1139,15 +1192,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
} else if (phdr->p_filesz) {
/* Replace "xxx.xxx" with "xxx.bxx" */
sprintf(fw_name + fw_name_len - 3, "b%02d", i);
- ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
+ ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
iounmap(ptr);
goto release_firmware;
}
- memcpy(ptr, seg_fw->data, seg_fw->size);
-
release_firmware(seg_fw);
}
@@ -1190,6 +1242,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
else if (ret < 0)
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
+ qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
+
release_firmware:
release_firmware(fw);
out:
@@ -1200,11 +1254,10 @@ out:
static void qcom_q6v5_dump_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
- void *dest)
+ void *dest, size_t cp_offset, size_t size)
{
int ret = 0;
struct q6v5 *qproc = rproc->priv;
- unsigned long mask = BIT((unsigned long)segment->priv);
int offset = segment->da - qproc->mpss_reloc;
void *ptr = NULL;
@@ -1221,19 +1274,19 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
}
if (!ret)
- ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
+ ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size);
if (ptr) {
- memcpy(dest, ptr, segment->size);
+ memcpy(dest, ptr, size);
iounmap(ptr);
} else {
- memset(dest, 0xff, segment->size);
+ memset(dest, 0xff, size);
}
- qproc->dump_segment_mask |= mask;
+ qproc->current_dump_size += size;
/* Reclaim mba after copying segments */
- if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
+ if (qproc->current_dump_size == qproc->total_dump_size) {
if (qproc->dump_mba_loaded) {
/* Try to reset ownership back to Q6 */
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
@@ -1255,7 +1308,8 @@ static int q6v5_start(struct rproc *rproc)
if (ret)
return ret;
- dev_info(qproc->dev, "MBA booted, loading mpss\n");
+ dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
+ qproc->dp_size ? "" : "out");
ret = q6v5_mpss_load(qproc);
if (ret)
@@ -1275,13 +1329,13 @@ static int q6v5_start(struct rproc *rproc)
"Failed to reclaim mba buffer system may become unstable\n");
/* Reset Dump Segment Mask */
- qproc->dump_segment_mask = 0;
- qproc->running = true;
+ qproc->current_dump_size = 0;
return 0;
reclaim_mpss:
q6v5_mba_reclaim(qproc);
+ q6v5_dump_mba_logs(qproc);
return ret;
}
@@ -1291,8 +1345,6 @@ static int q6v5_stop(struct rproc *rproc)
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- qproc->running = false;
-
ret = qcom_q6v5_request_stop(&qproc->q6v5);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
@@ -1324,7 +1376,7 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
- qproc->dump_complete_mask = 0;
+ qproc->total_dump_size = 0;
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -1335,11 +1387,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
phdr->p_memsz,
qcom_q6v5_dump_segment,
- (void *)i);
+ NULL);
if (ret)
break;
- qproc->dump_complete_mask |= BIT(i);
+ qproc->total_dump_size += phdr->p_memsz;
}
release_firmware(fw);
@@ -1554,39 +1606,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return 0;
}
-#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
-
-/* Register IPA notification function */
-int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
- void *data)
-{
- struct qcom_rproc_ipa_notify *ipa_notify;
- struct q6v5 *qproc = rproc->priv;
-
- if (!notify)
- return -EINVAL;
-
- ipa_notify = &qproc->ipa_notify_subdev;
- if (ipa_notify->notify)
- return -EBUSY;
-
- ipa_notify->notify = notify;
- ipa_notify->data = data;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
-
-/* Deregister IPA notification function */
-void qcom_deregister_ipa_notify(struct rproc *rproc)
-{
- struct q6v5 *qproc = rproc->priv;
-
- qproc->ipa_notify_subdev.notify = NULL;
-}
-EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
-#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
@@ -1701,6 +1720,7 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
+ qproc->has_mba_logs = desc->has_mba_logs;
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
qcom_msa_handover);
@@ -1712,7 +1732,6 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
- qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
@@ -1728,7 +1747,6 @@ static int q6v5_probe(struct platform_device *pdev)
remove_sysmon_subdev:
qcom_remove_sysmon_subdev(qproc->sysmon);
remove_subdevs:
- qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1750,7 +1768,6 @@ static int q6v5_remove(struct platform_device *pdev)
rproc_del(rproc);
qcom_remove_sysmon_subdev(qproc->sysmon);
- qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1792,6 +1809,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = true,
.has_spare_reg = true,
.version = MSS_SC7180,
};
@@ -1827,6 +1845,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
},
.need_mem_protection = true,
.has_alt_reset = true,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_SDM845,
};
@@ -1854,6 +1873,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8998,
};
@@ -1884,6 +1904,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8996,
};
@@ -1917,6 +1938,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8916,
};
@@ -1958,6 +1980,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8974,
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 61791a03f648..3837f23995e0 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -25,6 +25,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
@@ -64,6 +65,7 @@ struct qcom_adsp {
int pas_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -117,11 +119,17 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
- return qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
+ if (ret)
+ return ret;
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -405,6 +413,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp->rproc = rproc;
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
device_wakeup_enable(adsp->dev);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 88c76b9417fa..8846ef0b0f1a 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -14,6 +14,7 @@
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#define WCSS_CRASH_REASON 421
@@ -424,10 +425,17 @@ static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+ 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_size, &wcss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
- return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
- 0, wcss->mem_region, wcss->mem_phys,
- wcss->mem_size, &wcss->mem_reloc);
+ return ret;
}
static const struct rproc_ops q6v5_wcss_ops = {
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 8d8996d714f0..9eb2f6bccea6 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -71,7 +71,7 @@ static LIST_HEAD(sysmon_list);
/**
* sysmon_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
static void sysmon_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
@@ -343,7 +343,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
/**
* ssctl_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
static void ssctl_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 5d65e1a9329a..e2573f79a137 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -27,6 +27,7 @@
#include "qcom_common.h"
#include "remoteproc_internal.h"
+#include "qcom_pil_info.h"
#include "qcom_wcnss.h"
#define WCNSS_CRASH_REASON_SMEM 422
@@ -145,10 +146,17 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
+ wcnss->mem_region, wcnss->mem_phys,
+ wcnss->mem_size, &wcnss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
- return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
- wcnss->mem_region, wcnss->mem_phys,
- wcnss->mem_size, &wcnss->mem_reloc);
+ return 0;
}
static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
new file mode 100644
index 000000000000..b19ea3057bde
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Character device interface driver for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/remoteproc_cdev.h>
+
+#include "remoteproc_internal.h"
+
+#define NUM_RPROC_DEVICES 64
+static dev_t rproc_major;
+
+static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+ char cmd[10];
+
+ if (!len || len > sizeof(cmd))
+ return -EINVAL;
+
+ ret = copy_from_user(cmd, buf, len);
+ if (ret)
+ return -EFAULT;
+
+ if (!strncmp(cmd, "start", len)) {
+ if (rproc->state == RPROC_RUNNING)
+ return -EBUSY;
+
+ ret = rproc_boot(rproc);
+ } else if (!strncmp(cmd, "stop", len)) {
+ if (rproc->state != RPROC_RUNNING)
+ return -EINVAL;
+
+ rproc_shutdown(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognized option\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ void __user *argp = (void __user *)arg;
+ s32 param;
+
+ switch (ioctl) {
+ case RPROC_SET_SHUTDOWN_ON_RELEASE:
+ if (copy_from_user(&param, argp, sizeof(s32)))
+ return -EFAULT;
+
+ rproc->cdev_put_on_release = !!param;
+ break;
+ case RPROC_GET_SHUTDOWN_ON_RELEASE:
+ param = (s32)rproc->cdev_put_on_release;
+ if (copy_to_user(argp, &param, sizeof(s32)))
+ return -EFAULT;
+
+ break;
+ default:
+ dev_err(&rproc->dev, "Unsupported ioctl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rproc_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+
+ if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+ rproc_shutdown(rproc);
+
+ return 0;
+}
+
+static const struct file_operations rproc_fops = {
+ .write = rproc_cdev_write,
+ .unlocked_ioctl = rproc_device_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .release = rproc_cdev_release,
+};
+
+int rproc_char_device_add(struct rproc *rproc)
+{
+ int ret;
+
+ cdev_init(&rproc->cdev, &rproc_fops);
+ rproc->cdev.owner = THIS_MODULE;
+
+ rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
+ cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
+ ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
+ if (ret < 0)
+ dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
+
+ return ret;
+}
+
+void rproc_char_device_remove(struct rproc *rproc)
+{
+ __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc");
+}
+
+void __init rproc_init_cdev(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
+ if (ret < 0)
+ pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
+}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9f04c30c4aaf..7f90eeea67e2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -26,10 +26,8 @@
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
-#include <linux/devcoredump.h>
#include <linux/rculist.h>
#include <linux/remoteproc.h>
-#include <linux/pm_runtime.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/elf.h>
@@ -41,7 +39,6 @@
#include <linux/platform_device.h>
#include "remoteproc_internal.h"
-#include "remoteproc_elf_helpers.h"
#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
@@ -244,6 +241,7 @@ EXPORT_SYMBOL(rproc_da_to_va);
*
* Return: a valid pointer on carveout entry on success or NULL on failure.
*/
+__printf(2, 3)
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
{
@@ -411,10 +409,22 @@ void rproc_free_vring(struct rproc_vring *rvring)
idr_remove(&rproc->notifyids, rvring->notifyid);
- /* reset resource entry info */
- rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
- rsc->vring[idx].da = 0;
- rsc->vring[idx].notifyid = -1;
+ /*
+ * At this point rproc_stop() has been called and the installed resource
+ * table in the remote processor memory may no longer be accessible. As
+ * such and as per rproc_stop(), rproc->table_ptr points to the cached
+ * resource table (rproc->cached_table). The cached resource table is
+ * only available when a remote processor has been booted by the
+ * remoteproc core, otherwise it is NULL.
+ *
+ * Based on the above, reset the virtio device section in the cached
+ * resource table only if there is one to work with.
+ */
+ if (rproc->table_ptr) {
+ rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+ rsc->vring[idx].da = 0;
+ rsc->vring[idx].notifyid = -1;
+ }
}
static int rproc_vdev_do_start(struct rproc_subdev *subdev)
@@ -967,6 +977,7 @@ EXPORT_SYMBOL(rproc_add_carveout);
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*/
+__printf(8, 9)
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
void *va, dma_addr_t dma, size_t len, u32 da,
@@ -1010,6 +1021,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*/
+__printf(5, 6)
struct rproc_mem_entry *
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
@@ -1034,6 +1046,29 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
}
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
+/**
+ * rproc_of_parse_firmware() - parse and return the firmware-name
+ * @dev: pointer on device struct representing a rproc
+ * @index: index to use for the firmware-name retrieval
+ * @fw_name: pointer to a character string, in which the firmware
+ * name is returned on success and unmodified otherwise.
+ *
+ * This is an OF helper function that parses a device's DT node for
+ * the "firmware-name" property and returns the firmware name pointer
+ * in @fw_name on success.
+ *
+ * Return: 0 on success, or an appropriate failure.
+ */
+int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
+{
+ int ret;
+
+ ret = of_property_read_string_index(dev->of_node, "firmware-name",
+ index, fw_name);
+ return ret ? ret : 0;
+}
+EXPORT_SYMBOL(rproc_of_parse_firmware);
+
/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
@@ -1239,19 +1274,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
return 0;
}
-/**
- * rproc_coredump_cleanup() - clean up dump_segments list
- * @rproc: the remote processor handle
- */
-static void rproc_coredump_cleanup(struct rproc *rproc)
-{
- struct rproc_dump_segment *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
- list_del(&entry->node);
- kfree(entry);
- }
-}
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
@@ -1260,7 +1282,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
* This function will free all resources acquired for @rproc, and it
* is called whenever @rproc either shuts down or fails to boot.
*/
-static void rproc_resource_cleanup(struct rproc *rproc)
+void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
struct rproc_debug_trace *trace, *ttmp;
@@ -1304,6 +1326,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
rproc_coredump_cleanup(rproc);
}
+EXPORT_SYMBOL(rproc_resource_cleanup);
static int rproc_start(struct rproc *rproc, const struct firmware *fw)
{
@@ -1370,6 +1393,48 @@ reset_table_ptr:
return ret;
}
+static int rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto out;
+ }
+
+ /* Attach to the remote processor */
+ ret = rproc_attach_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't attach to rproc %s: %d\n",
+ rproc->name, ret);
+ goto unprepare_subdevices;
+ }
+
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now attached\n", rproc->name);
+
+ return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+out:
+ return ret;
+}
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -1383,12 +1448,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
if (ret)
return ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
- return ret;
- }
-
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -1398,7 +1457,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ret = rproc_enable_iommu(rproc);
if (ret) {
dev_err(dev, "can't enable iommu: %d\n", ret);
- goto put_pm_runtime;
+ return ret;
}
/* Prepare rproc for firmware loading if needed */
@@ -1452,8 +1511,63 @@ unprepare_rproc:
rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
-put_pm_runtime:
- pm_runtime_put(dev);
+ return ret;
+}
+
+/*
+ * Attach to remote processor - similar to rproc_fw_boot() but without
+ * the steps that deal with the firmware image.
+ */
+static int rproc_actuate(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
+ /*
+ * Handle firmware resources required to attach to a remote processor.
+ * Because we are attaching rather than booting the remote processor,
+ * we expect the platform driver to properly set rproc->table_ptr.
+ */
+ ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto disable_iommu;
+ }
+
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
+ ret = rproc_attach(rproc);
+ if (ret)
+ goto clean_up_resources;
+
+ return 0;
+
+clean_up_resources:
+ rproc_resource_cleanup(rproc);
+disable_iommu:
+ rproc_disable_iommu(rproc);
return ret;
}
@@ -1479,6 +1593,15 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
int ret;
/*
+ * Since the remote processor is in a detached state, it has already
+ * been booted by another entity. As such there is no point in waiting
+ * for a firmware image to be loaded, we can simply initiate the process
+ * of attaching to it immediately.
+ */
+ if (rproc->state == RPROC_DETACHED)
+ return rproc_boot(rproc);
+
+ /*
* We're initiating an asynchronous firmware loading, so we can
* be built-in kernel code, without hanging the boot process.
*/
@@ -1513,187 +1636,19 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
rproc->state = RPROC_OFFLINE;
- dev_info(dev, "stopped remote processor %s\n", rproc->name);
-
- return 0;
-}
-
-/**
- * rproc_coredump_add_segment() - add segment of device memory to coredump
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- *
- * Add device memory to the list of segments to be included in a coredump for
- * the remoteproc.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
-{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
-
- list_add_tail(&segment->node, &rproc->dump_segments);
-
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_add_segment);
-
-/**
- * rproc_coredump_add_custom_segment() - add custom coredump segment
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- * @dumpfn: custom dump function called for each segment during coredump
- * @priv: private data
- *
- * Add device memory to the list of segments to be included in the coredump
- * and associate the segment with the given custom dump function and private
- * data.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_custom_segment(struct rproc *rproc,
- dma_addr_t da, size_t size,
- void (*dumpfn)(struct rproc *rproc,
- struct rproc_dump_segment *segment,
- void *dest),
- void *priv)
-{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
- segment->priv = priv;
- segment->dump = dumpfn;
+ /*
+ * The remote processor has been stopped and is now offline, which means
+ * that the next time it is brought back online the remoteproc core will
+ * be responsible to load its firmware. As such it is no longer
+ * autonomous.
+ */
+ rproc->autonomous = false;
- list_add_tail(&segment->node, &rproc->dump_segments);
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
-EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
-/**
- * rproc_coredump_set_elf_info() - set coredump elf information
- * @rproc: handle of a remote processor
- * @class: elf class for coredump elf file
- * @machine: elf machine for coredump elf file
- *
- * Set elf information which will be used for coredump elf file.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
-{
- if (class != ELFCLASS64 && class != ELFCLASS32)
- return -EINVAL;
-
- rproc->elf_class = class;
- rproc->elf_machine = machine;
-
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_set_elf_info);
-
-/**
- * rproc_coredump() - perform coredump
- * @rproc: rproc handle
- *
- * This function will generate an ELF header for the registered segments
- * and create a devcoredump device associated with rproc.
- */
-static void rproc_coredump(struct rproc *rproc)
-{
- struct rproc_dump_segment *segment;
- void *phdr;
- void *ehdr;
- size_t data_size;
- size_t offset;
- void *data;
- void *ptr;
- u8 class = rproc->elf_class;
- int phnum = 0;
-
- if (list_empty(&rproc->dump_segments))
- return;
-
- if (class == ELFCLASSNONE) {
- dev_err(&rproc->dev, "Elf class is not set\n");
- return;
- }
-
- data_size = elf_size_of_hdr(class);
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- data_size += elf_size_of_phdr(class) + segment->size;
-
- phnum++;
- }
-
- data = vmalloc(data_size);
- if (!data)
- return;
-
- ehdr = data;
-
- memset(ehdr, 0, elf_size_of_hdr(class));
- /* e_ident field is common for both elf32 and elf64 */
- elf_hdr_init_ident(ehdr, class);
-
- elf_hdr_set_e_type(class, ehdr, ET_CORE);
- elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
- elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
- elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
- elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
- elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
- elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
- elf_hdr_set_e_phnum(class, ehdr, phnum);
-
- phdr = data + elf_hdr_get_e_phoff(class, ehdr);
- offset = elf_hdr_get_e_phoff(class, ehdr);
- offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
-
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- memset(phdr, 0, elf_size_of_phdr(class));
- elf_phdr_set_p_type(class, phdr, PT_LOAD);
- elf_phdr_set_p_offset(class, phdr, offset);
- elf_phdr_set_p_vaddr(class, phdr, segment->da);
- elf_phdr_set_p_paddr(class, phdr, segment->da);
- elf_phdr_set_p_filesz(class, phdr, segment->size);
- elf_phdr_set_p_memsz(class, phdr, segment->size);
- elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
- elf_phdr_set_p_align(class, phdr, 0);
-
- if (segment->dump) {
- segment->dump(rproc, segment, data + offset);
- } else {
- ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- if (!ptr) {
- dev_err(&rproc->dev,
- "invalid coredump segment (%pad, %zu)\n",
- &segment->da, segment->size);
- memset(data + offset, 0xff, segment->size);
- } else {
- memcpy(data + offset, ptr, segment->size);
- }
- }
-
- offset += elf_phdr_get_p_filesz(class, phdr);
- phdr += elf_size_of_phdr(class);
- }
-
- dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
-}
/**
* rproc_trigger_recovery() - recover a remoteproc
@@ -1815,24 +1770,30 @@ int rproc_boot(struct rproc *rproc)
goto unlock_mutex;
}
- /* skip the boot process if rproc is already powered up */
+ /* skip the boot or attach process if rproc is already powered up */
if (atomic_inc_return(&rproc->power) > 1) {
ret = 0;
goto unlock_mutex;
}
- dev_info(dev, "powering up %s\n", rproc->name);
+ if (rproc->state == RPROC_DETACHED) {
+ dev_info(dev, "attaching to %s\n", rproc->name);
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
- goto downref_rproc;
- }
+ ret = rproc_actuate(rproc);
+ } else {
+ dev_info(dev, "powering up %s\n", rproc->name);
- ret = rproc_fw_boot(rproc, firmware_p);
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto downref_rproc;
+ }
- release_firmware(firmware_p);
+ ret = rproc_fw_boot(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+ }
downref_rproc:
if (ret)
@@ -1891,8 +1852,6 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
- pm_runtime_put(dev);
-
/* Free the copy of the resource table */
kfree(rproc->cached_table);
rproc->cached_table = NULL;
@@ -1952,6 +1911,43 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
#endif
EXPORT_SYMBOL(rproc_get_by_phandle);
+static int rproc_validate(struct rproc *rproc)
+{
+ switch (rproc->state) {
+ case RPROC_OFFLINE:
+ /*
+ * An offline processor without a start()
+ * function makes no sense.
+ */
+ if (!rproc->ops->start)
+ return -EINVAL;
+ break;
+ case RPROC_DETACHED:
+ /*
+ * A remote processor in a detached state without an
+ * attach() function makes not sense.
+ */
+ if (!rproc->ops->attach)
+ return -EINVAL;
+ /*
+ * When attaching to a remote processor the device memory
+ * is already available and as such there is no need to have a
+ * cached table.
+ */
+ if (rproc->cached_table)
+ return -EINVAL;
+ break;
+ default:
+ /*
+ * When adding a remote processor, the state of the device
+ * can be offline or detached, nothing else.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
@@ -1981,11 +1977,30 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
+ ret = rproc_validate(rproc);
+ if (ret < 0)
+ return ret;
+
dev_info(dev, "%s is available\n", rproc->name);
/* create debugfs entries */
rproc_create_debug_dir(rproc);
+ /* add char device for this remoteproc */
+ ret = rproc_char_device_add(rproc);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Remind ourselves the remote processor has been attached to rather
+ * than booted by the remoteproc core. This is important because the
+ * RPROC_DETACHED state will be lost as soon as the remote processor
+ * has been attached to. Used in firmware_show() and reset in
+ * rproc_stop().
+ */
+ if (rproc->state == RPROC_DETACHED)
+ rproc->autonomous = true;
+
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
@@ -2183,9 +2198,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->state = RPROC_OFFLINE;
- pm_runtime_no_callbacks(&rproc->dev);
- pm_runtime_enable(&rproc->dev);
-
return rproc;
put_device:
@@ -2205,7 +2217,6 @@ EXPORT_SYMBOL(rproc_alloc);
*/
void rproc_free(struct rproc *rproc)
{
- pm_runtime_disable(&rproc->dev);
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_free);
@@ -2256,6 +2267,7 @@ int rproc_del(struct rproc *rproc)
mutex_unlock(&rproc->lock);
rproc_delete_debug_dir(rproc);
+ rproc_char_device_remove(rproc);
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
@@ -2424,6 +2436,7 @@ static int __init remoteproc_init(void)
{
rproc_init_sysfs();
rproc_init_debugfs();
+ rproc_init_cdev();
rproc_init_panic();
return 0;
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
new file mode 100644
index 000000000000..bb15a29038e8
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Coredump functionality for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+struct rproc_coredump_state {
+ struct rproc *rproc;
+ void *header;
+ struct completion dump_done;
+};
+
+/**
+ * rproc_coredump_cleanup() - clean up dump_segments list
+ * @rproc: the remote processor handle
+ */
+void rproc_coredump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * rproc_coredump_add_segment() - add segment of device memory to coredump
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ *
+ * Add device memory to the list of segments to be included in a coredump for
+ * the remoteproc.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_segment);
+
+/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ * @dumpfn: custom dump function called for each segment during coredump
+ * @priv: private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset,
+ size_t size),
+ void *priv)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+ segment->priv = priv;
+ segment->dump = dumpfn;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc: handle of a remote processor
+ * @class: elf class for coredump elf file
+ * @machine: elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+ if (class != ELFCLASS64 && class != ELFCLASS32)
+ return -EINVAL;
+
+ rproc->elf_class = class;
+ rproc->elf_machine = machine;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+static void rproc_coredump_free(void *data)
+{
+ struct rproc_coredump_state *dump_state = data;
+
+ vfree(dump_state->header);
+ complete(&dump_state->dump_done);
+}
+
+static void *rproc_coredump_find_segment(loff_t user_offset,
+ struct list_head *segments,
+ size_t *data_left)
+{
+ struct rproc_dump_segment *segment;
+
+ list_for_each_entry(segment, segments, node) {
+ if (user_offset < segment->size) {
+ *data_left = segment->size - user_offset;
+ return segment;
+ }
+ user_offset -= segment->size;
+ }
+
+ *data_left = 0;
+ return NULL;
+}
+
+static void rproc_copy_segment(struct rproc *rproc, void *dest,
+ struct rproc_dump_segment *segment,
+ size_t offset, size_t size)
+{
+ void *ptr;
+
+ if (segment->dump) {
+ segment->dump(rproc, segment, dest, offset, size);
+ } else {
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size);
+ if (!ptr) {
+ dev_err(&rproc->dev,
+ "invalid copy request for segment %pad with offset %zu and size %zu)\n",
+ &segment->da, offset, size);
+ memset(dest, 0xff, size);
+ } else {
+ memcpy(dest, ptr, size);
+ }
+ }
+}
+
+static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t header_sz)
+{
+ size_t seg_data, bytes_left = count;
+ ssize_t copy_sz;
+ struct rproc_dump_segment *seg;
+ struct rproc_coredump_state *dump_state = data;
+ struct rproc *rproc = dump_state->rproc;
+ void *elfcore = dump_state->header;
+
+ /* Copy the vmalloc'ed header first. */
+ if (offset < header_sz) {
+ copy_sz = memory_read_from_buffer(buffer, count, &offset,
+ elfcore, header_sz);
+
+ return copy_sz;
+ }
+
+ /*
+ * Find out the segment memory chunk to be copied based on offset.
+ * Keep copying data until count bytes are read.
+ */
+ while (bytes_left) {
+ seg = rproc_coredump_find_segment(offset - header_sz,
+ &rproc->dump_segments,
+ &seg_data);
+ /* EOF check */
+ if (!seg) {
+ dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
+ offset);
+ break;
+ }
+
+ copy_sz = min_t(size_t, bytes_left, seg_data);
+
+ rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
+ copy_sz);
+
+ offset += copy_sz;
+ buffer += copy_sz;
+ bytes_left -= copy_sz;
+ }
+
+ return count - bytes_left;
+}
+
+/**
+ * rproc_coredump() - perform coredump
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered segments
+ * and create a devcoredump device associated with rproc. Based on the
+ * coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *phdr;
+ void *ehdr;
+ size_t data_size;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int phnum = 0;
+ struct rproc_coredump_state dump_state;
+ enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ data_size = elf_size_of_hdr(class);
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ /*
+ * For default configuration buffer includes headers & segments.
+ * For inline dump buffer just includes headers as segments are
+ * directly read from device memory.
+ */
+ data_size += elf_size_of_phdr(class);
+ if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ data_size += segment->size;
+
+ phnum++;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
+
+ if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
+ }
+ if (dump_conf == RPROC_COREDUMP_DEFAULT) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /*
+ * Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 732770e92b99..2e3b3e22e1d0 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -28,6 +28,94 @@
static struct dentry *rproc_dbg;
/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via debugfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DEFAULT] = "default",
+ [RPROC_COREDUMP_INLINE] = "inline",
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[20];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%s\n",
+ rproc_coredump_str[rproc->dump_conf]);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+/*
+ * By writing to the 'coredump' debugfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' debugfs entry supports these commands:
+ *
+ * default: This is the default coredump mechanism. When the remoteproc
+ * crashes the entire coredump will be copied to a separate buffer
+ * and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ *
+ * disabled: This will disable coredump. Recovery will proceed without
+ * collecting any dump.
+ */
+static ssize_t rproc_coredump_write(struct file *filp,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ int ret, err = 0;
+ char buf[20];
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret)
+ return -EFAULT;
+
+ /* remove end of line */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!strncmp(buf, "disable", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "inline", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else if (!strncmp(buf, "default", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DEFAULT;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ err = -EINVAL;
+ }
+out:
+ return err ? err : count;
+}
+
+static const struct file_operations rproc_coredump_fops = {
+ .read = rproc_coredump_read,
+ .write = rproc_coredump_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/*
* Some remote processors may support dumping trace logs into a shared
* memory buffer. We expose this trace buffer using debugfs, so users
* can easily tell what's going on remotely.
@@ -337,6 +425,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_rsc_table_fops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
rproc, &rproc_carveouts_fops);
+ debugfs_create_file("coredump", 0600, rproc->dbg_dir,
+ rproc, &rproc_coredump_fops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 4ba7cb59d3e8..c34002888d2c 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -28,6 +28,8 @@ struct rproc_debug_trace {
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
void rproc_vdev_release(struct kref *ref);
+int rproc_of_parse_firmware(struct device *dev, int index,
+ const char **fw_name);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
@@ -47,6 +49,38 @@ extern struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+
+#ifdef CONFIG_REMOTEPROC_CDEV
+void rproc_init_cdev(void);
+void rproc_exit_cdev(void);
+int rproc_char_device_add(struct rproc *rproc);
+void rproc_char_device_remove(struct rproc *rproc);
+#else
+static inline void rproc_init_cdev(void)
+{
+}
+
+static inline void rproc_exit_cdev(void)
+{
+}
+
+/*
+ * The character device interface is an optional feature, if it is not enabled
+ * the function should not return an error.
+ */
+static inline int rproc_char_device_add(struct rproc *rproc)
+{
+ return 0;
+}
+
+static inline void rproc_char_device_remove(struct rproc *rproc)
+{
+}
+#endif
+
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
@@ -79,6 +113,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc)
return 0;
}
+static inline int rproc_attach_device(struct rproc *rproc)
+{
+ if (rproc->ops->attach)
+ return rproc->ops->attach(rproc);
+
+ return 0;
+}
+
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 52b871327b55..eea514cec50e 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rproc *rproc = to_rproc(dev);
-
- return sprintf(buf, "%s\n", rproc->firmware);
+ const char *firmware = rproc->firmware;
+
+ /*
+ * If the remote processor has been started by an external
+ * entity we have no idea of what image it is running. As such
+ * simply display a generic string rather then rproc->firmware.
+ *
+ * Here we rely on the autonomous flag because a remote processor
+ * may have been attached to and currently in a running state.
+ */
+ if (rproc->autonomous)
+ firmware = "unknown";
+
+ return sprintf(buf, "%s\n", firmware);
}
/* Change firmware name via sysfs */
@@ -72,6 +84,7 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 062797a447c6..f4da42fc0eeb 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -39,6 +39,15 @@
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
+#define RSC_TBL_SIZE 1024
+
+#define M4_STATE_OFF 0
+#define M4_STATE_INI 1
+#define M4_STATE_CRUN 2
+#define M4_STATE_CSTOP 3
+#define M4_STATE_STANDBY 4
+#define M4_STATE_CRASH 5
+
struct stm32_syscon {
struct regmap *map;
u32 reg;
@@ -71,12 +80,15 @@ struct stm32_rproc {
struct reset_control *rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
+ struct stm32_syscon m4_state;
+ struct stm32_syscon rsctbl;
int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
struct workqueue_struct *workqueue;
bool secured_soc;
+ void __iomem *rsc_va;
};
static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
@@ -128,10 +140,10 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
return 0;
}
-static int stm32_rproc_of_memory_translations(struct rproc *rproc)
+static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
+ struct stm32_rproc *ddata)
{
- struct device *parent, *dev = rproc->dev.parent;
- struct stm32_rproc *ddata = rproc->priv;
+ struct device *parent, *dev = &pdev->dev;
struct device_node *np;
struct stm32_rproc_mem *p_mems;
struct stm32_rproc_mem_ranges *mem_range;
@@ -204,7 +216,7 @@ static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
return 0;
}
-static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
@@ -257,12 +269,23 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
index++;
}
+ return 0;
+}
+
+static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret = stm32_rproc_parse_memory_regions(rproc);
+
+ if (ret)
+ return ret;
+
return stm32_rproc_elf_load_rsc_table(rproc, fw);
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
{
- struct rproc *rproc = data;
+ struct platform_device *pdev = data;
+ struct rproc *rproc = platform_get_drvdata(pdev);
rproc_report_crash(rproc, RPROC_WATCHDOG);
@@ -437,6 +460,13 @@ static int stm32_rproc_start(struct rproc *rproc)
return stm32_rproc_set_hold_boot(rproc, true);
}
+static int stm32_rproc_attach(struct rproc *rproc)
+{
+ stm32_rproc_add_coredump_trace(rproc);
+
+ return stm32_rproc_set_hold_boot(rproc, true);
+}
+
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
@@ -474,6 +504,18 @@ static int stm32_rproc_stop(struct rproc *rproc)
}
}
+ /* update coprocessor state to OFF if available */
+ if (ddata->m4_state.map) {
+ err = regmap_update_bits(ddata->m4_state.map,
+ ddata->m4_state.reg,
+ ddata->m4_state.mask,
+ M4_STATE_OFF);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set copro state\n");
+ return err;
+ }
+ }
+
return 0;
}
@@ -502,6 +544,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
static struct rproc_ops st_rproc_ops = {
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
+ .attach = stm32_rproc_attach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
@@ -538,12 +581,11 @@ out:
return err;
}
-static int stm32_rproc_parse_dt(struct platform_device *pdev)
+static int stm32_rproc_parse_dt(struct platform_device *pdev,
+ struct stm32_rproc *ddata, bool *auto_boot)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct stm32_rproc *ddata = rproc->priv;
struct stm32_syscon tz;
unsigned int tzen;
int err, irq;
@@ -554,7 +596,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
- dev_name(dev), rproc);
+ dev_name(dev), pdev);
if (err) {
dev_err(dev, "failed to request wdg irq\n");
return err;
@@ -589,7 +631,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = regmap_read(tz.map, tz.reg, &tzen);
if (err) {
- dev_err(&rproc->dev, "failed to read tzen\n");
+ dev_err(dev, "failed to read tzen\n");
return err;
}
ddata->secured_soc = tzen & tz.mask;
@@ -605,9 +647,118 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
if (err)
dev_info(dev, "failed to get pdds\n");
- rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
+ *auto_boot = of_property_read_bool(np, "st,auto-boot");
- return stm32_rproc_of_memory_translations(rproc);
+ /*
+ * See if we can check the M4 status, i.e if it was started
+ * from the boot loader or not.
+ */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
+ &ddata->m4_state);
+ if (err) {
+ /* remember this */
+ ddata->m4_state.map = NULL;
+ /* no coprocessor state syscon (optional) */
+ dev_warn(dev, "m4 state not supported\n");
+
+ /* no need to go further */
+ return 0;
+ }
+
+ /* See if we can get the resource table */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
+ &ddata->rsctbl);
+ if (err) {
+ /* no rsc table syscon (optional) */
+ dev_warn(dev, "rsc tbl syscon not supported\n");
+ }
+
+ return 0;
+}
+
+static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+ unsigned int *state)
+{
+ /* See stm32_rproc_parse_dt() */
+ if (!ddata->m4_state.map) {
+ /*
+ * We couldn't get the coprocessor's state, assume
+ * it is not running.
+ */
+ state = M4_STATE_OFF;
+ return 0;
+ }
+
+ return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
+}
+
+static int stm32_rproc_da_to_pa(struct platform_device *pdev,
+ struct stm32_rproc *ddata,
+ u64 da, phys_addr_t *pa)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
+ struct rproc *rproc,
+ struct stm32_rproc *ddata)
+{
+ struct device *dev = &pdev->dev;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return err;
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return 0;
+
+ err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
+ if (err)
+ return err;
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * The resource table is already loaded in device memory, no need
+ * to work with a cached table.
+ */
+ rproc->cached_table = NULL;
+ /* Assuming the resource table fits in 1kB is fair */
+ rproc->table_sz = RSC_TBL_SIZE;
+ rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
+
+ return 0;
}
static int stm32_rproc_probe(struct platform_device *pdev)
@@ -616,6 +767,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
+ unsigned int state;
int ret;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -626,25 +778,47 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (!rproc)
return -ENOMEM;
+ ddata = rproc->priv;
+
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_of_memory_translations(pdev, ddata);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_get_m4_status(ddata, &state);
+ if (ret)
+ goto free_rproc;
+
+ if (state == M4_STATE_CRUN) {
+ rproc->state = RPROC_DETACHED;
+
+ ret = stm32_rproc_parse_memory_regions(rproc);
+ if (ret)
+ goto free_resources;
+
+ ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
+ if (ret)
+ goto free_resources;
+ }
+
rproc->has_iommu = false;
- ddata = rproc->priv;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
dev_err(dev, "cannot create workqueue\n");
ret = -ENOMEM;
- goto free_rproc;
+ goto free_resources;
}
platform_set_drvdata(pdev, rproc);
- ret = stm32_rproc_parse_dt(pdev);
- if (ret)
- goto free_wkq;
-
ret = stm32_rproc_request_mbox(rproc);
if (ret)
- goto free_rproc;
+ goto free_wkq;
ret = rproc_add(rproc);
if (ret)
@@ -656,6 +830,8 @@ free_mb:
stm32_rproc_free_mbox(rproc);
free_wkq:
destroy_workqueue(ddata->workqueue);
+free_resources:
+ rproc_resource_cleanup(rproc);
free_rproc:
if (device_may_wakeup(dev)) {
dev_pm_clear_wake_irq(dev);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
new file mode 100644
index 000000000000..9011e477290c
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 DSP Remote Processor(s) driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_dsp_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_dsp_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_dsp_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_dsp_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_dsp_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_dsp_dev_data {
+ const struct k3_dsp_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_dsp_rproc - k3 DSP remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_dsp_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_dsp_mem *mem;
+ int num_mems;
+ struct k3_dsp_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_dsp_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 DSP rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+/* Put the DSP processor into reset */
+static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (kproc->data->uses_lreset)
+ return ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+/* Release the DSP processor from reset */
+static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset)
+ goto lreset;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+lreset:
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+ ret);
+
+ return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int k3_dsp_rproc_start(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ u32 boot_addr;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ boot_addr = rproc->bootaddr;
+ if (boot_addr & (kproc->data->boot_align_addr - 1)) {
+ dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
+ boot_addr, kproc->data->boot_align_addr);
+ ret = -EINVAL;
+ goto put_mbox;
+ }
+
+ dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ ret = k3_dsp_rproc_release(kproc);
+ if (ret)
+ goto put_mbox;
+
+ return 0;
+
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int k3_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+
+ mbox_free_channel(kproc->mbox);
+
+ k3_dsp_rproc_reset(kproc);
+
+ return 0;
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if (da >= dev_addr &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if (da >= bus_addr &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_dsp_rproc_ops = {
+ .start = k3_dsp_rproc_start,
+ .stop = k3_dsp_rproc_stop,
+ .kick = k3_dsp_rproc_kick,
+ .da_to_va = k3_dsp_rproc_da_to_va,
+};
+
+static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_dsp_rproc *kproc)
+{
+ const struct k3_dsp_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = kproc->data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(kproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(kproc->mem[i].cpu_addr);
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(kproc->dev);
+ return ret;
+}
+
+static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static
+struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_dsp_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct k3_dsp_dev_data *data;
+ struct k3_dsp_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ int ret = 0;
+ int ret1;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ if (data->uses_lreset) {
+ rproc->ops->prepare = k3_dsp_rproc_prepare;
+ rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ }
+ kproc = rproc->priv;
+ kproc->rproc = rproc;
+ kproc->dev = dev;
+ kproc->data = data;
+
+ kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = PTR_ERR(kproc->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ kproc->ti_sci = NULL;
+ goto free_rproc;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto put_sci;
+ }
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset)) {
+ ret = PTR_ERR(kproc->reset);
+ dev_err(dev, "failed to get reset, status = %d\n", ret);
+ goto put_sci;
+ }
+
+ kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(kproc->tsp);
+ goto put_sci;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto free_tsp;
+ }
+
+ ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ goto release_tsp;
+
+ ret = k3_dsp_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ goto release_tsp;
+ }
+
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP doesn't
+ * execute bogus code in .prepare() when the module reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, kproc);
+
+ return 0;
+
+release_mem:
+ k3_dsp_reserved_mem_exit(kproc);
+release_tsp:
+ ret1 = ti_sci_proc_release(kproc->tsp);
+ if (ret1)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+ kfree(kproc->tsp);
+put_sci:
+ ret1 = ti_sci_put_handle(kproc->ti_sci);
+ if (ret1)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int k3_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rproc_del(kproc->rproc);
+
+ ret = ti_sci_proc_release(kproc->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ kfree(kproc->tsp);
+
+ ret = ti_sci_put_handle(kproc->ti_sci);
+ if (ret)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+ k3_dsp_reserved_mem_exit(kproc);
+ rproc_free(kproc->rproc);
+
+ return 0;
+}
+
+static const struct k3_dsp_mem_data c66_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+};
+
+/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
+static const struct k3_dsp_mem_data c71_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1dram", .dev_addr = 0xe00000 },
+};
+
+static const struct k3_dsp_dev_data c66_data = {
+ .mems = c66_mems,
+ .num_mems = ARRAY_SIZE(c66_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
+static const struct k3_dsp_dev_data c71_data = {
+ .mems = c71_mems,
+ .num_mems = ARRAY_SIZE(c71_mems),
+ .boot_align_addr = SZ_2M,
+ .uses_lreset = false,
+};
+
+static const struct of_device_id k3_dsp_of_match[] = {
+ { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
+ { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
+
+static struct platform_driver k3_dsp_rproc_driver = {
+ .probe = k3_dsp_rproc_probe,
+ .remove = k3_dsp_rproc_remove,
+ .driver = {
+ .name = "k3-dsp-rproc",
+ .of_match_table = k3_dsp_of_match,
+ },
+};
+
+module_platform_driver(k3_dsp_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
new file mode 100644
index 000000000000..778558abcdcc
--- /dev/null
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Texas Instruments TI-SCI Processor Controller Helper Functions
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_SCI_PROC_H
+#define REMOTEPROC_TI_SCI_PROC_H
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_proc - structure representing a processor control client
+ * @sci: cached TI-SCI protocol handle
+ * @ops: cached TI-SCI proc ops
+ * @dev: cached client device pointer
+ * @proc_id: processor id for the consumer remoteproc device
+ * @host_id: host id to pass the control over for this consumer remoteproc
+ * device
+ */
+struct ti_sci_proc {
+ const struct ti_sci_handle *sci;
+ const struct ti_sci_proc_ops *ops;
+ struct device *dev;
+ u8 proc_id;
+ u8 host_id;
+};
+
+static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->request(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor request failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_release(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->release(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor release failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n",
+ tsp->proc_id, tsp->host_id, ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp,
+ u64 boot_vector,
+ u32 cfg_set, u32 cfg_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_set, cfg_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp,
+ u32 ctrl_set, u32 ctrl_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp,
+ u64 *boot_vector, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *status_flags)
+{
+ int ret;
+
+ ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_flags, ctrl_flags, status_flags);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n",
+ ret);
+ return ret;
+}
+
+#endif /* REMOTEPROC_TI_SCI_PROC_H */
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index d170fe663210..e8aa8691deb2 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -222,7 +222,7 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
switch (id) {
case IMX8MQ_RESET_PCIEPHY:
- case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+ case IMX8MQ_RESET_PCIEPHY2:
/*
* wait for more than 10us to release phy g_rst and
* btnrst
@@ -232,12 +232,12 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
- case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
+ case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN:
+ case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 1995f5b3ea67..f40312b16da0 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -553,7 +553,7 @@ static void qcom_glink_receive_version(struct qcom_glink *glink,
break;
case GLINK_VERSION_1:
glink->features &= features;
- /* FALLTHROUGH */
+ fallthrough;
default:
qcom_glink_send_version_ack(glink);
break;
@@ -584,7 +584,7 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
break;
glink->features &= features;
- /* FALLTHROUGH */
+ fallthrough;
default:
qcom_glink_send_version(glink);
break;
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 07d4f3374098..9006fc7f73d0 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/wait.h>
@@ -84,11 +85,11 @@ struct virtproc_info {
* Every message sent(/received) on the rpmsg bus begins with this header.
*/
struct rpmsg_hdr {
- u32 src;
- u32 dst;
- u32 reserved;
- u16 len;
- u16 flags;
+ __virtio32 src;
+ __virtio32 dst;
+ __virtio32 reserved;
+ __virtio16 len;
+ __virtio16 flags;
u8 data[];
} __packed;
@@ -106,8 +107,8 @@ struct rpmsg_hdr {
*/
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
- u32 addr;
- u32 flags;
+ __virtio32 addr;
+ __virtio32 flags;
} __packed;
/**
@@ -335,8 +336,8 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->ept->addr;
- nsm.flags = RPMSG_NS_CREATE;
+ nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -359,8 +360,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->ept->addr;
- nsm.flags = RPMSG_NS_DESTROY;
+ nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -612,18 +613,18 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
}
}
- msg->len = len;
+ msg->len = cpu_to_virtio16(vrp->vdev, len);
msg->flags = 0;
- msg->src = src;
- msg->dst = dst;
+ msg->src = cpu_to_virtio32(vrp->vdev, src);
+ msg->dst = cpu_to_virtio32(vrp->vdev, dst);
msg->reserved = 0;
memcpy(msg->data, data, len);
dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
- msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+ src, dst, len, msg->flags, msg->reserved);
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg, sizeof(*msg) + len, true);
#endif
rpmsg_sg_init(&sg, msg, sizeof(*msg) + len);
@@ -704,13 +705,17 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
{
struct rpmsg_endpoint *ept;
struct scatterlist sg;
+ unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len);
int err;
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
- msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+ virtio32_to_cpu(vrp->vdev, msg->src),
+ virtio32_to_cpu(vrp->vdev, msg->dst), msg_len,
+ virtio16_to_cpu(vrp->vdev, msg->flags),
+ virtio32_to_cpu(vrp->vdev, msg->reserved));
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg, sizeof(*msg) + msg_len, true);
#endif
/*
@@ -718,15 +723,15 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
* the reported payload length.
*/
if (len > vrp->buf_size ||
- msg->len > (len - sizeof(struct rpmsg_hdr))) {
- dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
+ msg_len > (len - sizeof(struct rpmsg_hdr))) {
+ dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg_len);
return -EINVAL;
}
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
- ept = idr_find(&vrp->endpoints, msg->dst);
+ ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst));
/* let's make sure no one deallocates ept while we use it */
if (ept)
@@ -739,8 +744,8 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
mutex_lock(&ept->cb_lock);
if (ept->cb)
- ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
- msg->src);
+ ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
+ virtio32_to_cpu(vrp->vdev, msg->src));
mutex_unlock(&ept->cb_lock);
@@ -846,15 +851,15 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
- dev_info(dev, "%sing channel %s addr 0x%x\n",
- msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
- msg->name, msg->addr);
-
strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = msg->addr;
+ chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
+
+ dev_info(dev, "%sing channel %s addr 0x%x\n",
+ virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
+ "destroy" : "creat", msg->name, chinfo.dst);
- if (msg->flags & RPMSG_NS_DESTROY) {
+ if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
if (ret)
dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f3b8e6dcd879..48c536acd777 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -281,7 +281,8 @@ config RTC_DRV_DS1374
config RTC_DRV_DS1374_WDT
bool "Dallas/Maxim DS1374 watchdog timer"
- depends on RTC_DRV_DS1374
+ depends on RTC_DRV_DS1374 && WATCHDOG
+ select WATCHDOG_CORE
help
If you say Y here you will get support for the
watchdog timer in the Dallas Semiconductor DS1374
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 811fe2005488..2370ac0cdb5f 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -7,7 +7,7 @@
*
* Detailed datasheet of the chip is available here:
*
- * http://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf
+ * https://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf
*
* This work is based on ISL12057 driver (drivers/rtc/rtc-isl12057.c).
*
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 4a63f0cd2321..933e4237237d 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -6,7 +6,7 @@
* Copyright (C) 2014 Pavel Machek <pavel@denx.de>
*
* You can get hardware description at
- * http://www.ti.com/lit/ds/symlink/bq32000.pdf
+ * https://www.ti.com/lit/ds/symlink/bq32000.pdf
*/
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index a603f1f21125..800667d73a6f 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -261,7 +261,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->rtc_dev);
rtc->rtc_dev->ops = &cpcap_rtc_ops;
- rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1;
+ rtc->rtc_dev->range_max = (timeu64_t) (DAY_MASK + 1) * SECS_PER_DAY - 1;
err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor);
if (err)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 49702942bb08..54c85cdd019d 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1668,6 +1668,8 @@ static const struct watchdog_ops ds1388_wdt_ops = {
static void ds1307_wdt_register(struct ds1307 *ds1307)
{
struct watchdog_device *wdt;
+ int err;
+ int val;
if (ds1307->type != ds_1388)
return;
@@ -1676,6 +1678,10 @@ static void ds1307_wdt_register(struct ds1307 *ds1307)
if (!wdt)
return;
+ err = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &val);
+ if (!err && val & DS1388_BIT_WF)
+ wdt->bootstatus = WDIOF_CARDRESET;
+
wdt->info = &ds1388_wdt_info;
wdt->ops = &ds1388_wdt_ops;
wdt->timeout = 99;
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 9c51a12cf70f..177d870bda0d 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -46,6 +46,7 @@
#define DS1374_REG_WDALM2 0x06
#define DS1374_REG_CR 0x07 /* Control */
#define DS1374_REG_CR_AIE 0x01 /* Alarm Int. Enable */
+#define DS1374_REG_CR_WDSTR 0x08 /* 1=INT, 0=RST */
#define DS1374_REG_CR_WDALM 0x20 /* 1=Watchdog, 0=Alarm */
#define DS1374_REG_CR_WACE 0x40 /* WD/Alarm counter enable */
#define DS1374_REG_SR 0x08 /* Status */
@@ -71,7 +72,9 @@ struct ds1374 {
struct i2c_client *client;
struct rtc_device *rtc;
struct work_struct work;
-
+#ifdef CONFIG_RTC_DRV_DS1374_WDT
+ struct watchdog_device wdt;
+#endif
/* The mutex protects alarm operations, and prevents a race
* between the enable_irq() in the workqueue and the free_irq()
* in the remove function.
@@ -369,238 +372,96 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
*
*****************************************************************************
*/
-static struct i2c_client *save_client;
/* Default margin */
-#define WD_TIMO 131762
+#define TIMER_MARGIN_DEFAULT 32
+#define TIMER_MARGIN_MIN 1
+#define TIMER_MARGIN_MAX 4095 /* 24-bit value */
-#define DRV_NAME "DS1374 Watchdog"
-
-static int wdt_margin = WD_TIMO;
-static unsigned long wdt_is_open;
+static int wdt_margin;
module_param(wdt_margin, int, 0);
MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 32s)");
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default ="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT)")");
+
static const struct watchdog_info ds1374_wdt_info = {
- .identity = "DS1374 WTD",
+ .identity = "DS1374 Watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
-static int ds1374_wdt_settimeout(unsigned int timeout)
+static int ds1374_wdt_settimeout(struct watchdog_device *wdt, unsigned int timeout)
{
- int ret = -ENOIOCTLCMD;
- int cr;
+ struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
+ struct i2c_client *client = ds1374->client;
+ int ret, cr;
- ret = cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
- if (ret < 0)
- goto out;
+ wdt->timeout = timeout;
+
+ cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+ if (cr < 0)
+ return cr;
/* Disable any existing watchdog/alarm before setting the new one */
cr &= ~DS1374_REG_CR_WACE;
- ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+ ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
if (ret < 0)
- goto out;
+ return ret;
/* Set new watchdog time */
- ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3);
- if (ret) {
- pr_info("couldn't set new watchdog time\n");
- goto out;
- }
+ timeout = timeout * 4096;
+ ret = ds1374_write_rtc(client, timeout, DS1374_REG_WDALM0, 3);
+ if (ret)
+ return ret;
/* Enable watchdog timer */
cr |= DS1374_REG_CR_WACE | DS1374_REG_CR_WDALM;
+ cr &= ~DS1374_REG_CR_WDSTR;/* for RST PIN */
cr &= ~DS1374_REG_CR_AIE;
- ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+ ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
if (ret < 0)
- goto out;
+ return ret;
return 0;
-out:
- return ret;
}
-
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
-static void ds1374_wdt_ping(void)
+static int ds1374_wdt_start(struct watchdog_device *wdt)
{
+ struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
u32 val;
- int ret = 0;
- ret = ds1374_read_rtc(save_client, &val, DS1374_REG_WDALM0, 3);
- if (ret)
- pr_info("WD TICK FAIL!!!!!!!!!! %i\n", ret);
+ return ds1374_read_rtc(ds1374->client, &val, DS1374_REG_WDALM0, 3);
}
-static void ds1374_wdt_disable(void)
+static int ds1374_wdt_stop(struct watchdog_device *wdt)
{
+ struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
+ struct i2c_client *client = ds1374->client;
int cr;
- cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
+ cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+ if (cr < 0)
+ return cr;
+
/* Disable watchdog timer */
cr &= ~DS1374_REG_CR_WACE;
- i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
-}
-
-/*
- * Watchdog device is opened, and watchdog starts running.
- */
-static int ds1374_wdt_open(struct inode *inode, struct file *file)
-{
- struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
-
- if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
- mutex_lock(&ds1374->mutex);
- if (test_and_set_bit(0, &wdt_is_open)) {
- mutex_unlock(&ds1374->mutex);
- return -EBUSY;
- }
- /*
- * Activate
- */
- wdt_is_open = 1;
- mutex_unlock(&ds1374->mutex);
- return stream_open(inode, file);
- }
- return -ENODEV;
-}
-
-/*
- * Close the watchdog device.
- */
-static int ds1374_wdt_release(struct inode *inode, struct file *file)
-{
- if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
- clear_bit(0, &wdt_is_open);
-
- return 0;
-}
-
-/*
- * Pat the watchdog whenever device is written to.
- */
-static ssize_t ds1374_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- ds1374_wdt_ping();
- return 1;
- }
- return 0;
-}
-
-static ssize_t ds1374_wdt_read(struct file *file, char __user *data,
- size_t len, loff_t *ppos)
-{
- return 0;
-}
-
-/*
- * Handle commands from user-space.
- */
-static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int new_margin, options;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info __user *)arg,
- &ds1374_wdt_info, sizeof(ds1374_wdt_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int __user *)arg);
- case WDIOC_KEEPALIVE:
- ds1374_wdt_ping();
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, (int __user *)arg))
- return -EFAULT;
-
- /* the hardware's tick rate is 4096 Hz, so
- * the counter value needs to be scaled accordingly
- */
- new_margin <<= 12;
- if (new_margin < 1 || new_margin > 16777216)
- return -EINVAL;
-
- wdt_margin = new_margin;
- ds1374_wdt_settimeout(new_margin);
- ds1374_wdt_ping();
- /* fallthrough */
- case WDIOC_GETTIMEOUT:
- /* when returning ... inverse is true */
- return put_user((wdt_margin >> 12), (int __user *)arg);
- case WDIOC_SETOPTIONS:
- if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- pr_info("disable watchdog\n");
- ds1374_wdt_disable();
- return 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- pr_info("enable watchdog\n");
- ds1374_wdt_settimeout(wdt_margin);
- ds1374_wdt_ping();
- return 0;
- }
- return -EINVAL;
- }
- return -ENOTTY;
+ return i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
}
-static long ds1374_wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
- struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
-
- mutex_lock(&ds1374->mutex);
- ret = ds1374_wdt_ioctl(file, cmd, arg);
- mutex_unlock(&ds1374->mutex);
-
- return ret;
-}
-
-static int ds1374_wdt_notify_sys(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- /* Disable Watchdog */
- ds1374_wdt_disable();
- return NOTIFY_DONE;
-}
-
-static const struct file_operations ds1374_wdt_fops = {
- .owner = THIS_MODULE,
- .read = ds1374_wdt_read,
- .unlocked_ioctl = ds1374_wdt_unlocked_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .write = ds1374_wdt_write,
- .open = ds1374_wdt_open,
- .release = ds1374_wdt_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice ds1374_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &ds1374_wdt_fops,
-};
-
-static struct notifier_block ds1374_wdt_notifier = {
- .notifier_call = ds1374_wdt_notify_sys,
+static const struct watchdog_ops ds1374_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ds1374_wdt_start,
+ .stop = ds1374_wdt_stop,
+ .set_timeout = ds1374_wdt_settimeout,
};
-
#endif /*CONFIG_RTC_DRV_DS1374_WDT*/
/*
*****************************************************************************
@@ -652,16 +513,22 @@ static int ds1374_probe(struct i2c_client *client,
return ret;
#ifdef CONFIG_RTC_DRV_DS1374_WDT
- save_client = client;
- ret = misc_register(&ds1374_miscdev);
+ ds1374->wdt.info = &ds1374_wdt_info;
+ ds1374->wdt.ops = &ds1374_wdt_ops;
+ ds1374->wdt.timeout = TIMER_MARGIN_DEFAULT;
+ ds1374->wdt.min_timeout = TIMER_MARGIN_MIN;
+ ds1374->wdt.max_timeout = TIMER_MARGIN_MAX;
+
+ watchdog_init_timeout(&ds1374->wdt, wdt_margin, &client->dev);
+ watchdog_set_nowayout(&ds1374->wdt, nowayout);
+ watchdog_stop_on_reboot(&ds1374->wdt);
+ watchdog_stop_on_unregister(&ds1374->wdt);
+ watchdog_set_drvdata(&ds1374->wdt, ds1374);
+ ds1374_wdt_settimeout(&ds1374->wdt, ds1374->wdt.timeout);
+
+ ret = devm_watchdog_register_device(&client->dev, &ds1374->wdt);
if (ret)
return ret;
- ret = register_reboot_notifier(&ds1374_wdt_notifier);
- if (ret) {
- misc_deregister(&ds1374_miscdev);
- return ret;
- }
- ds1374_wdt_settimeout(131072);
#endif
return 0;
@@ -670,11 +537,6 @@ static int ds1374_probe(struct i2c_client *client,
static int ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
-#ifdef CONFIG_RTC_DRV_DS1374_WDT
- misc_deregister(&ds1374_miscdev);
- ds1374_miscdev.parent = NULL;
- unregister_reboot_notifier(&ds1374_wdt_notifier);
-#endif
if (client->irq > 0) {
mutex_lock(&ds1374->mutex);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 27797157fcb3..6349d2cd3680 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
writel(rtc_alarm64, base + TIMER_ALARM_LOW);
+ writel(1, base + TIMER_IRQ_ENABLED);
} else {
/*
* if this function was called with enabled=0
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index f21dc6b16d88..8d141d8a5490 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -95,7 +95,7 @@
/**
* struct imxdi_dev - private imxdi rtc data
- * @pdev: pionter to platform dev
+ * @pdev: pointer to platform dev
* @rtc: pointer to rtc struct
* @ioaddr: IO registers pointer
* @clk: input reference clock
@@ -350,7 +350,7 @@ static int di_handle_invalid_and_failure_state(struct imxdi_dev *imxdi, u32 dsr)
* the tamper register is locked. We cannot disable the
* tamper detection. The TDCHL can only be reset by a
* DRYICE POR, but we cannot force a DRYICE POR in
- * softwere because we are still in "FAILURE STATE".
+ * software because we are still in "FAILURE STATE".
* We need a DRYICE POR via battery power cycling....
*/
/*
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 9b70b371bd0c..8a89bc52b0d4 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -740,7 +740,7 @@ static int wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
wdt_margin = new_margin;
wdt_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(wdt_margin, (int __user *)arg);
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 03ebcf1c0f3d..d51cc12114cb 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -805,17 +805,36 @@ static int max77686_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int max77686_rtc_suspend(struct device *dev)
{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
if (device_may_wakeup(dev)) {
struct max77686_rtc_info *info = dev_get_drvdata(dev);
- return enable_irq_wake(info->virq);
+ ret = enable_irq_wake(info->virq);
}
- return 0;
+ /*
+ * If the main IRQ (not virtual) is the parent IRQ, then it must be
+ * disabled during suspend because if it happens while suspended it
+ * will be handled before resuming I2C.
+ *
+ * Since Main IRQ is shared, all its users should disable it to be sure
+ * it won't fire while one of them is still suspended.
+ */
+ if (!info->drv_data->rtc_irq_from_platform)
+ disable_irq(info->rtc_irq);
+
+ return ret;
}
static int max77686_rtc_resume(struct device *dev)
{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+
+ if (!info->drv_data->rtc_irq_from_platform)
+ enable_irq(info->rtc_irq);
+
if (device_may_wakeup(dev)) {
struct max77686_rtc_info *info = dev_get_drvdata(dev);
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 1660d5e79582..21cbf7f892e8 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -7,7 +7,7 @@
* based on other Linux RTC drivers
*
* Device datasheet:
- * http://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf
+ * https://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf
*/
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9c5670776c68..ed6316992cbb 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -28,8 +29,11 @@
#define PCF2127_BIT_CTRL1_TSF1 BIT(4)
/* Control register 2 */
#define PCF2127_REG_CTRL2 0x01
+#define PCF2127_BIT_CTRL2_AIE BIT(1)
#define PCF2127_BIT_CTRL2_TSIE BIT(2)
+#define PCF2127_BIT_CTRL2_AF BIT(4)
#define PCF2127_BIT_CTRL2_TSF2 BIT(5)
+#define PCF2127_BIT_CTRL2_WDTF BIT(6)
/* Control register 3 */
#define PCF2127_REG_CTRL3 0x02
#define PCF2127_BIT_CTRL3_BLIE BIT(0)
@@ -46,6 +50,13 @@
#define PCF2127_REG_DW 0x07
#define PCF2127_REG_MO 0x08
#define PCF2127_REG_YR 0x09
+/* Alarm registers */
+#define PCF2127_REG_ALARM_SC 0x0A
+#define PCF2127_REG_ALARM_MN 0x0B
+#define PCF2127_REG_ALARM_HR 0x0C
+#define PCF2127_REG_ALARM_DM 0x0D
+#define PCF2127_REG_ALARM_DW 0x0E
+#define PCF2127_BIT_ALARM_AE BIT(7)
/* Watchdog registers */
#define PCF2127_REG_WD_CTL 0x10
#define PCF2127_BIT_WD_CTL_TF0 BIT(0)
@@ -324,6 +335,112 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
.set_timeout = pcf2127_wdt_set_timeout,
};
+/* Alarm */
+static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ unsigned int buf[5], ctrl2;
+ int ret;
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+ if (ret)
+ return ret;
+
+ ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
+ sizeof(buf));
+ if (ret)
+ return ret;
+
+ alrm->enabled = ctrl2 & PCF2127_BIT_CTRL2_AIE;
+ alrm->pending = ctrl2 & PCF2127_BIT_CTRL2_AF;
+
+ alrm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ alrm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+ alrm->time.tm_hour = bcd2bin(buf[2] & 0x3F);
+ alrm->time.tm_mday = bcd2bin(buf[3] & 0x3F);
+
+ return 0;
+}
+
+static int pcf2127_rtc_alarm_irq_enable(struct device *dev, u32 enable)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
+ PCF2127_BIT_CTRL2_AIE,
+ enable ? PCF2127_BIT_CTRL2_AIE : 0);
+ if (ret)
+ return ret;
+
+ return pcf2127_wdt_active_ping(&pcf2127->wdd);
+}
+
+static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ uint8_t buf[5];
+ int ret;
+
+ ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
+ PCF2127_BIT_CTRL2_AF, 0);
+ if (ret)
+ return ret;
+
+ ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+ if (ret)
+ return ret;
+
+ buf[0] = bin2bcd(alrm->time.tm_sec);
+ buf[1] = bin2bcd(alrm->time.tm_min);
+ buf[2] = bin2bcd(alrm->time.tm_hour);
+ buf[3] = bin2bcd(alrm->time.tm_mday);
+ buf[4] = PCF2127_BIT_ALARM_AE; /* Do not match on week day */
+
+ ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
+ sizeof(buf));
+ if (ret)
+ return ret;
+
+ return pcf2127_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ unsigned int ctrl2 = 0;
+ int ret = 0;
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!(ctrl2 & PCF2127_BIT_CTRL2_AF))
+ return IRQ_NONE;
+
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~(PCF2127_BIT_CTRL2_AF | PCF2127_BIT_CTRL2_WDTF));
+
+ rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
+
+ pcf2127_wdt_active_ping(&pcf2127->wdd);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops pcf2127_rtc_alrm_ops = {
+ .ioctl = pcf2127_rtc_ioctl,
+ .read_time = pcf2127_rtc_read_time,
+ .set_time = pcf2127_rtc_set_time,
+ .read_alarm = pcf2127_rtc_read_alarm,
+ .set_alarm = pcf2127_rtc_set_alarm,
+ .alarm_irq_enable = pcf2127_rtc_alarm_irq_enable,
+};
+
/* sysfs interface */
static ssize_t timestamp0_store(struct device *dev,
@@ -416,7 +533,7 @@ static const struct attribute_group pcf2127_attr_group = {
};
static int pcf2127_probe(struct device *dev, struct regmap *regmap,
- const char *name, bool has_nvmem)
+ int alarm_irq, const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
u32 wdd_timeout;
@@ -440,6 +557,23 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
+ pcf2127->rtc->uie_unsupported = 1;
+
+ if (alarm_irq >= 0) {
+ ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
+ pcf2127_rtc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "failed to request alarm irq\n");
+ return ret;
+ }
+ }
+
+ if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) {
+ device_init_wakeup(dev, true);
+ pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
+ }
pcf2127->wdd.parent = dev;
pcf2127->wdd.info = &pcf2127_wdt_info;
@@ -553,6 +687,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
static const struct of_device_id pcf2127_of_match[] = {
{ .compatible = "nxp,pcf2127" },
{ .compatible = "nxp,pcf2129" },
+ { .compatible = "nxp,pca2129" },
{}
};
MODULE_DEVICE_TABLE(of, pcf2127_of_match);
@@ -657,13 +792,14 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- return pcf2127_probe(&client->dev, regmap,
+ return pcf2127_probe(&client->dev, regmap, client->irq,
pcf2127_i2c_driver.driver.name, id->driver_data);
}
static const struct i2c_device_id pcf2127_i2c_id[] = {
{ "pcf2127", 1 },
{ "pcf2129", 0 },
+ { "pca2129", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
@@ -722,13 +858,15 @@ static int pcf2127_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name,
+ return pcf2127_probe(&spi->dev, regmap, spi->irq,
+ pcf2127_spi_driver.driver.name,
spi_get_device_id(spi)->driver_data);
}
static const struct spi_device_id pcf2127_spi_id[] = {
{ "pcf2127", 1 },
{ "pcf2129", 0 },
+ { "pca2129", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 7a87f461bec8..f8b99cb72959 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -21,8 +21,8 @@
/*
* Information for this driver was pulled from the following datasheets.
*
- * http://www.nxp.com/documents/data_sheet/PCF85063A.pdf
- * http://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
+ * https://www.nxp.com/documents/data_sheet/PCF85063A.pdf
+ * https://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
*
* PCF85063A -- Rev. 6 — 18 November 2015
* PCF85063TP -- Rev. 4 — 6 May 2015
@@ -353,7 +353,7 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063,
default:
dev_warn(&pcf85063->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000",
load);
- /* fall through */
+ fallthrough;
case 7000:
break;
case 12500:
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 47e0f411dd5c..57d351dfe272 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -108,7 +108,7 @@ static int pcf8523_load_capacitance(struct i2c_client *client)
default:
dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
load);
- /* fall through */
+ fallthrough;
case 12500:
value |= REG_CONTROL1_CAP_SEL;
break;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 40d7450a1ce4..c6b89273feba 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -275,6 +275,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct pl031_local *ldata = dev_get_drvdata(dev);
writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR);
+ pl031_alarm_irq_enable(dev, alarm->enabled);
return 0;
}
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index c9bc3d4a1e66..0a969af80af7 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -331,7 +331,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
default:
dev_warn(&pdev->dev,
"invalid crystal-freq specified in device-tree. Assuming no crystal\n");
- /* fall-through */
+ fallthrough;
case 0:
/* keep XTAL on in low-power mode */
pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 94edbb33d0d1..aca022239b33 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
+ /*
+ * The loop might take long time for platforms with lots of
+ * known devices. Allow scheduling here.
+ */
+ cond_resched();
}
return 0;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index d5880f52dc2b..5896e5282a4e 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -818,7 +818,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
- int rc = EINVAL;
+ int rc;
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
@@ -886,7 +886,7 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
- int rc = -EINVAL;
+ int rc;
u32 _nr_apqns, *_apqns = NULL;
if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 3ce99e4db44d..661d2a49bce9 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1695,7 +1695,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
grp->changed_side = 2;
break;
}
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index ab316baa8284..85a1a4533cbe 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -357,7 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
if (callback)
grp->send_qllc_disc = 1;
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
@@ -1470,7 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
break;
- /* Else, fall through */
+ fallthrough;
default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
@@ -2089,7 +2089,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
grp->estconnfunc = NULL;
break;
}
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bba1b54b8aa3..6a7398251423 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1071,7 +1071,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
break;
case -EIO:
qeth_schedule_recovery(card);
- /* fall through */
+ fallthrough;
default:
qeth_clear_ipacmd_list(card);
goto err_idx;
@@ -2886,7 +2886,7 @@ void qeth_print_status_message(struct qeth_card *card)
card->info.mcl_level[3]);
break;
}
- /* fallthrough */
+ fallthrough;
case QETH_CARD_TYPE_IQD:
if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index ebdc03210608..f870c5322bfe 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -356,7 +356,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10000baseT_Full);
- /* fall through */
+ fallthrough;
case SPEED_1000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
@@ -366,7 +366,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
1000baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
1000baseT_Half);
- /* fall through */
+ fallthrough;
case SPEED_100:
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
@@ -376,7 +376,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
100baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Half);
- /* fall through */
+ fallthrough;
case SPEED_10:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10baseT_Full);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8b342a88ff5c..3a94f6cad167 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -488,7 +488,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
kfree(mac);
break;
}
- /* fall through */
+ fallthrough;
default:
/* for next call to set_rx_mode(): */
mac->disp_flag = QETH_DISP_ADDR_DELETE;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fe44b0249e34..4d461960370d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1235,7 +1235,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
break;
}
addr->ref_counter = 1;
- /* fall through */
+ fallthrough;
default:
/* for next call to set_rx_mode(): */
addr->disp_flag = QETH_DISP_ADDR_DELETE;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c795f22249d8..140186fe1d1e 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -434,7 +434,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
return;
}
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
zfcp_fsf_protstatus_eval(req);
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
@@ -867,7 +867,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
/* lookup request again, list might have changed */
zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 461b3babb601..84b57a8f86bf 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1832,7 +1832,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
case REQUEST_SENSE:
/* clear the internal sense magic */
SCp->cmnd[6] = 0;
- /* fall through */
+ fallthrough;
default:
/* OK, get it from the command */
switch(SCp->sc_data_direction) {
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index bb49d83cadc7..ccb061ab0a0a 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2635,7 +2635,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
case BLOGIC_BAD_CMD_PARAM:
blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
adapter, adapter_status);
- /* fall through */
+ fallthrough;
case BLOGIC_DATA_UNDERRUN:
case BLOGIC_DATA_OVERRUN:
case BLOGIC_NOEXPECT_BUSFREE:
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 0f17bd51088a..24ace1824048 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1034,11 +1034,14 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
temp6 >>= 1;
switch (temp & 0x3) {
case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
- temp6 |= 0x8000; /* Fall through */
+ temp6 |= 0x8000;
+ fallthrough;
case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
- temp5 |= 0x8000; /* Fall through */
+ temp5 |= 0x8000;
+ fallthrough;
case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
- temp2 |= 0x8000; /* Fall through */
+ temp2 |= 0x8000;
+ fallthrough;
case AUTO_RATE_00: /* Asynchronous */
break;
}
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f2f7e6e76c07..d654a6cc4162 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1943,7 +1943,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
return;
/* Reject message */
- /* Fall through */
+ fallthrough;
default:
/*
* If we get something weird that we aren't expecting,
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 769af4ca9ca9..fd6ae5c38086 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2809,7 +2809,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
@@ -2884,7 +2884,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Issue FIB to tell Firmware to flush it's cache */
if ((aac_cache & 6) != 2)
return aac_synchronize(scsicmd);
- /* fall through */
+ fallthrough;
case INQUIRY:
{
struct inquiry_data inq_data;
@@ -3240,7 +3240,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
SCSI_SENSE_BUFFERSIZE));
break;
}
- /* fall through */
+ fallthrough;
case RESERVE:
case RELEASE:
case REZERO_UNIT:
@@ -3253,7 +3253,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case START_STOP:
return aac_start_stop(scsicmd);
- /* FALLTHRU */
+ fallthrough;
default:
/*
* Unhandled commands
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index adbdc3b7c7a7..383e74fea6ed 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1431,7 +1431,7 @@ retry_next:
"enclosure services event");
scsi_device_set_state(device, SDEV_RUNNING);
}
- /* FALLTHRU */
+ fallthrough;
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 8588da0a0655..a3aee146537b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -765,7 +765,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index c912d29b8bdf..1c617c0d5899 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2274,7 +2274,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
switch (scb->hscb->task_management) {
case SIU_TASKMGMT_ABORT_TASK:
tag = SCB_GET_TAG(scb);
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_ABORT_TASK_SET:
case SIU_TASKMGMT_CLEAR_TASK_SET:
lun = scb->hscb->lun;
@@ -2285,7 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
break;
case SIU_TASKMGMT_LUN_RESET:
lun = scb->hscb->lun;
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_TARGET_RESET:
{
struct ahd_devinfo devinfo;
@@ -3791,7 +3791,7 @@ ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
@@ -5104,7 +5104,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
case MSG_MESSAGE_REJECT:
response = ahd_handle_msg_reject(ahd, devinfo);
- /* FALLTHROUGH */
+ fallthrough;
case MSG_NOOP:
done = MSGLOOP_MSGCOMPLETE;
break;
@@ -5454,7 +5454,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
#endif
ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
- /* FALLTHROUGH */
+ fallthrough;
case MSG_TERM_IO_PROC:
default:
reject = TRUE;
@@ -6117,17 +6117,17 @@ ahd_free(struct ahd_softc *ahd)
default:
case 5:
ahd_shutdown(ahd);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
ahd->shared_data_map.dmamap);
ahd_dmamap_destroy(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1:
@@ -6513,7 +6513,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
}
ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
{
struct map_node *sg_map;
@@ -6528,7 +6528,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
}
ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 5:
{
struct map_node *hscb_map;
@@ -7171,7 +7171,7 @@ ahd_init(struct ahd_softc *ahd)
case FLX_CSTAT_OVER:
case FLX_CSTAT_UNDER:
warn_user++;
- /* fall through */
+ fallthrough;
case FLX_CSTAT_INVALID:
case FLX_CSTAT_OKAY:
if (warn_user == 0 && bootverbose == 0)
@@ -8175,12 +8175,12 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in qinfifo\n");
ahd_done_with_status(ahd, scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
break;
case SEARCH_PRINT:
printk(" 0x%x", ahd->qinfifo[qinpos]);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
ahd_qinfifo_requeue(ahd, prev_scb, scb);
prev_scb = scb;
@@ -8271,7 +8271,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB pending MK_MSG\n");
ahd_done_with_status(ahd, mk_msg_scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
{
u_int tail_offset;
@@ -8295,7 +8295,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
}
case SEARCH_PRINT:
printk(" 0x%x", SCB_GET_TAG(scb));
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
break;
}
@@ -8376,7 +8376,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in Waiting List\n");
ahd_done_with_status(ahd, scb, status);
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
ahd_rem_wscb(ahd, scbid, prev, next, tid);
*list_tail = prev;
@@ -8385,7 +8385,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
break;
case SEARCH_PRINT:
printk("0x%x ", scbid);
- /* fall through */
+ fallthrough;
case SEARCH_COUNT:
prev = scbid;
break;
@@ -9023,7 +9023,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
case SCSI_STATUS_OK:
printk("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
- /* FALLTHROUGH */
+ fallthrough;
default:
ahd_done(ahd, scb);
break;
@@ -9512,7 +9512,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
fmt3_ins = &instr.format3;
fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -9523,7 +9523,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
}
fmt1_ins->parity = 0;
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
{
int i, count;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index d019e3f2bb9b..7c321303969e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -2035,7 +2035,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
break;
case CAM_AUTOSENSE_FAIL:
new_status = DID_ERROR;
- /* Fallthrough */
+ fallthrough;
case CAM_SCSI_STATUS_ERROR:
scsi_status = ahd_cmd_get_scsi_status(cmd);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 3d4df906fa4f..2231c4afa531 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2404,7 +2404,7 @@ ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
@@ -3599,7 +3599,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
case MSG_MESSAGE_REJECT:
response = ahc_handle_msg_reject(ahc, devinfo);
- /* FALLTHROUGH */
+ fallthrough;
case MSG_NOOP:
done = MSGLOOP_MSGCOMPLETE;
break;
@@ -4465,17 +4465,17 @@ ahc_free(struct ahc_softc *ahc)
default:
case 5:
ahc_shutdown(ahc);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
ahc->shared_data_dmamap);
ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1:
@@ -4893,30 +4893,30 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
}
ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
ahc_dmamap_unload(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 5:
ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
scb_data->sense_dmamap);
ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 4:
ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
- /* fall through */
+ fallthrough;
case 3:
ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 2:
ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
scb_data->hscb_dmamap);
ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 1:
ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
break;
@@ -5981,7 +5981,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
printk("Inactive SCB in Waiting List\n");
ahc_done(ahc, scb);
}
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
next = ahc_rem_wscb(ahc, next, prev);
break;
@@ -6987,7 +6987,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
address -= address_offset;
fmt3_ins->address = address;
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -7013,7 +7013,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
fmt1_ins->opcode = AIC_OP_AND;
fmt1_ins->immediate = 0xff;
}
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
if ((ahc->features & AHC_ULTRA2) != 0) {
int i, count;
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index c264b4b56970..e2d880a5f391 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -706,11 +706,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
@@ -718,7 +718,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
default:
@@ -730,7 +730,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -789,7 +789,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
- /* fall through */
+ fallthrough;
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 1fcee65193a3..0eb6e206a2b4 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -490,7 +490,7 @@ int asd_abort_task(struct sas_task *task)
switch (tcs.dl_opcode) {
default:
res = asd_clear_nexus(task);
- /* fallthrough */
+ fallthrough;
case TC_NO_ERROR:
break;
/* The task hasn't been sent to the device xor
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index fa562a085600..ec895d0319f0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -4470,7 +4470,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1202:
case PCI_DEVICE_ID_ARECA_1210:
raid6 = 0;
- /*FALLTHRU*/
+ fallthrough;
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 6c68c2303638..2e687ce60753 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -603,7 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg)
msgqueue_flush(&info->scsi.msgs);
msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
info->scsi.phase = PHASE_MSGOUT_EXPECT;
- /* fall through */
+ fallthrough;
case async:
dev->period = info->ifcfg.asyncperiod / 4;
@@ -916,7 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info)
fas216_done(info, DID_ABORT);
break;
}
- /* else, fall through */
+ fallthrough;
default: /* huh? */
printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
@@ -1413,7 +1413,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
@@ -1426,7 +1426,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
@@ -1581,7 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned
fas216_message(info);
break;
}
- /* else, fall through */
+ fallthrough;
default:
fas216_log(info, 0, "internal phase %s for function done?"
@@ -1964,7 +1964,7 @@ static void fas216_kick(FAS216_Info *info)
switch (where_from) {
case TYPE_QUEUE:
fas216_allocate_tag(info, SCpnt);
- /* fall through */
+ fallthrough;
case TYPE_OTHER:
fas216_start_command(info, SCpnt);
break;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 93da6344424d..a13c203ef7a9 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -677,7 +677,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 8dc2e0824ad7..5c3513a4b450 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1532,7 +1532,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
- /* fall through */
+ fallthrough;
case UNSOL_DATA_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].data;
break;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 29f99561dfc3..38d1c453074d 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -2572,7 +2572,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
case FCP_IODIR_RW:
bfa_stats(itnim, input_reqs);
bfa_stats(itnim, output_reqs);
- /* fall through */
+ fallthrough;
default:
bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
}
@@ -2807,7 +2807,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
case BFI_IOIM_STS_TIMEDOUT:
bfa_stats(ioim->itnim, iocomp_timedout);
- /* fall through */
+ fallthrough;
case BFI_IOIM_STS_ABORTED:
rsp->io_status = BFI_IOIM_STS_ABORTED;
bfa_stats(ioim->itnim, iocomp_aborted);
@@ -3203,7 +3203,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /* fall through */
+ fallthrough;
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
bfa_tskim_send_abort(tskim);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 297a77f5806c..3486e402bfc1 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -6422,7 +6422,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
@@ -6448,7 +6448,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 143c35bd668c..c21aa37b8adb 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -419,13 +419,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
rport->plogi_retries++;
@@ -856,7 +856,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
* At least go offline when a PLOGI is received.
*/
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
@@ -1042,7 +1042,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1131,7 +1131,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
}
- /* fall through */
+ fallthrough;
case RPSM_EVENT_ADDRESS_CHANGE:
if (!bfa_fcs_lport_is_online(rport->port)) {
@@ -1288,7 +1288,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1332,7 +1332,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index dd5821dfcac2..325ad8a592bb 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -969,7 +969,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
@@ -1045,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
@@ -5988,7 +5988,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
- /* fall through */
+ fallthrough;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 1e266c1ef793..11c0c3e6f014 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -6397,7 +6397,7 @@ bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
dport->test_state = BFA_DPORT_ST_INP;
bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
}
- /* fall thru */
+ fallthrough;
case BFA_DPORT_SM_REQFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_enabled);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e72d7bb7f4f4..08992095ce7a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1404,7 +1404,6 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
break;
case FCOE_KCQE_OPCODE_FCOE_ERROR:
- /* fall thru */
default:
printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 98d4d39aaa57..7fa20609d5e7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2939,7 +2939,7 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
case CSIO_HWE_FW_DLOAD:
csio_set_state(&hw->sm, csio_hws_resetting);
/* Download firmware */
- /* Fall through */
+ fallthrough;
case CSIO_HWE_HBA_RESET:
csio_set_state(&hw->sm, csio_hws_resetting);
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 61cf54208451..dc98f51f466f 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1187,7 +1187,6 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
break;
case CSIO_LNE_LINK_DOWN:
- /* Fall through */
case CSIO_LNE_DOWN_LINK:
csio_set_state(&ln->sm, csio_lns_uninit);
if (csio_is_phys_ln(ln)) {
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 0ca695110f54..9010cb6045dc 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -808,7 +808,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
- /* fall through */
+ fallthrough;
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 2b48954b6b1e..37d99357120f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -643,7 +643,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4e82c14cb795..2c3491528d42 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1133,7 +1133,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ?
-EPIPE : -ECONNRESET;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 71aebaf533ea..0e8621a6956d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2457,10 +2457,10 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return err;
}
- __kfree_skb(skb);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 94250ebe9e80..e72440d919d2 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -748,16 +748,16 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_TWO:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_ONE:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- /* fall through */
+ fallthrough;
case FREE_IRQ:
cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- /* fall through */
+ fallthrough;
case UNDO_NOOP:
/* No action required */
break;
@@ -971,18 +971,18 @@ static void cxlflash_remove(struct pci_dev *pdev)
switch (cfg->init_state) {
case INIT_STATE_CDEV:
cxlflash_release_chrdev(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
- /* fall through */
+ fallthrough;
case INIT_STATE_AFU:
term_afu(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_PCI:
cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
- /* fall through */
+ fallthrough;
case INIT_STATE_NONE:
free_mem(cfg);
scsi_host_put(cfg->host);
@@ -2355,11 +2355,11 @@ retry:
cxlflash_schedule_async_reset(cfg);
break;
}
- /* fall through - to retry */
+ fallthrough; /* to retry */
case -EAGAIN:
if (++nretry < 2)
goto retry;
- /* fall through - to exit */
+ fallthrough; /* to exit */
default:
break;
}
@@ -2533,12 +2533,12 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
cfg->state = STATE_NORMAL;
wake_up_all(&cfg->reset_waitq);
ssleep(1);
- /* fall through */
+ fallthrough;
case STATE_RESET:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
break;
- /* fall through */
+ fallthrough;
default:
rc = FAILED;
break;
@@ -3019,7 +3019,7 @@ retry:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
goto retry;
- /* else, fall through */
+ fallthrough;
default:
/* Ideally should not happen */
dev_err(dev, "%s: Device is not ready, state=%d\n",
@@ -3531,7 +3531,7 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto out;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 593669ac3669..5dddf67dfa24 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -375,14 +375,13 @@ retry:
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
result &= ~SAM_STAT_CHECK_CONDITION;
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device capacity changed */
case 0x3F: /* Report LUNs changed */
/* Retry the command once more */
@@ -1791,13 +1790,12 @@ static int process_sense(struct scsi_device *sdev,
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device settings/capacity changed */
rc = read_cap16(sdev, lli);
if (rc) {
@@ -2157,7 +2155,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
if (unlikely(rc))
goto cxlflash_ioctl_exit;
- /* fall through */
+ fallthrough;
case DK_CXLFLASH_MANAGE_LUN:
known_ioctl = true;
@@ -2168,7 +2166,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto cxlflash_ioctl_exit;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 8acd4bb9fefb..4a3f7831a2d6 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -60,7 +60,7 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
ret = SCSI_DH_OK;
break;
}
- /* Fallthrough */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
@@ -147,7 +147,7 @@ retry:
rc = SCSI_DH_RETRY;
break;
}
- /* fall through */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed, "
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index b02ac389e6c6..429d64299fe9 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1500,7 +1500,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
}
- /* fall through */
+ fallthrough;
case FI_ACT_UP: /* Upload the components */
default:
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index eb7d139ffc00..09c5c24bf391 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -1236,7 +1236,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
break;
}
- /* fall through */
+ fallthrough;
case ESAS2R_INIT_MSG_GET_INIT:
if (msg == ESAS2R_INIT_MSG_GET_INIT) {
@@ -1250,7 +1250,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
esas2r_hdebug("FAILED");
}
}
- /* fall through */
+ fallthrough;
default:
rq->req_stat = RS_SUCCESS;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 89afa31e33cb..43a1fd11df5e 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -307,7 +307,7 @@ static void esp_reset_esp(struct esp *esp)
case FASHME:
esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
- /* fallthrough... */
+ fallthrough;
case FAS236:
case PCSCSI:
@@ -1741,7 +1741,7 @@ again:
case ESP_EVENT_DATA_IN:
write = 1;
- /* fallthru */
+ fallthrough;
case ESP_EVENT_DATA_OUT: {
struct esp_cmd_entry *ent = esp->active_cmd;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 85c7959961cc..5ea426effa60 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -256,9 +256,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
- kfree(new);
mutex_unlock(&cdev->lock);
}
+ kfree(new);
}
/**
@@ -450,10 +450,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
switch (fip->mode) {
default:
LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
- /* fall-through */
+ fallthrough;
case FIP_MODE_AUTO:
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
- /* fall-through */
+ fallthrough;
case FIP_MODE_FABRIC:
case FIP_MODE_NON_FIP:
mutex_unlock(&fip->ctlr_mutex);
@@ -773,7 +773,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
fc_fcoe_set_mac(mac, fh->fh_d_id);
fip->update_mac(lport, mac);
}
- /* fall through */
+ fallthrough;
case ELS_LS_RJT:
op = fr_encaps(fp);
if (op)
@@ -2439,7 +2439,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
frport->enode_mac, 0);
break;
}
- /* fall through */
+ fallthrough;
case FIP_ST_VNMP_START:
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"restart VN2VN negotiation\n");
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 2cc676e3df6a..29e4cdcade72 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -340,7 +340,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
case BOARD_DTC3181E:
hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
+ fallthrough;
case BOARD_NCR53C400A:
case BOARD_HP_C2502:
hostdata->c400_ctl_status = 9;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 11caa4b0d797..d9d21d23372e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1144,7 +1144,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
hisi_hba->hw->get_events(hisi_hba, phy_no);
break;
}
- /* fallthru */
+ fallthrough;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 91794a50b31f..48d5da59262b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4697,7 +4697,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
case WRITE_6:
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
case READ_12:
if (*cdb_len == 6) {
@@ -5147,7 +5147,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
switch (cmd->cmnd[0]) {
case WRITE_6:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
(cmd->cmnd[2] << 8) |
@@ -5158,7 +5158,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_10:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_10:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5171,7 +5171,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_12:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5186,7 +5186,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_16:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_16:
first_block =
(((u64) cmd->cmnd[2]) << 56) |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 77f4d37d5bd6..ea7c8930592d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1866,7 +1866,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
(bsg_request->rqst_data.h_els.port_id[1] << 8) |
bsg_request->rqst_data.h_els.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
@@ -1875,7 +1875,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
bsg_request->rqst_data.h_ct.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -4122,7 +4122,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
- /* fall through */
+ fallthrough;
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index d9e94e81da01..cc3908c2d2f9 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1581,7 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
case H_PERMISSION:
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
- /* Fall through */
+ fallthrough;
default:
dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
rc);
@@ -2489,10 +2489,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
break;
case H_CLOSED:
vscsi->flags |= CLIENT_FAILED;
- /* Fall through */
+ fallthrough;
case H_DROPPED:
vscsi->flags |= RESPONSE_Q_DOWN;
- /* Fall through */
+ fallthrough;
case H_REMOTE_PARM:
dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
rc);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 1459b1467027..862d35a098cf 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -801,7 +801,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!imm_select(dev, scmd_id(cmd))) {
@@ -809,7 +809,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -819,7 +819,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (!imm_send_command(cmd))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -835,7 +835,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
cmd->SCp.phase++;
if (cmd->SCp.this_residual & 0x01)
cmd->SCp.this_residual++;
- /* fall through */
+ fallthrough;
case 5: /* Phase 5 - Pre-Data transfer stage */
/* Spin lock for BUSY */
@@ -852,7 +852,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (imm_negotiate(dev))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 6: /* Phase 6 - Data transfer stage */
/* Spin lock for BUSY */
@@ -868,7 +868,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 1;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 7: /* Phase 7 - Post data transfer stage */
if ((dev->dp) && (dev->rd)) {
@@ -880,7 +880,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
}
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 8: /* Phase 8 - Read status/message */
/* Check for data overrun */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 7f9b3f20e5e4..4cacb800b530 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -778,7 +778,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
case SCU_EVENT_LINK_FAILURE:
scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
- /* fall through */
+ fallthrough;
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index cd1e4b4d95bb..c3f540b55689 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -310,7 +310,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
/* Kill all outstanding requests for the device. */
sci_remote_device_terminate_requests(idev);
- /* Fall through - into the default case... */
+ fallthrough; /* into the default case */
default:
clear_bit(IDEV_IO_READY, &idev->flags);
break;
@@ -593,7 +593,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
break;
}
- /* fall through - and treat as unhandled... */
+ fallthrough; /* and treat as unhandled */
default:
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 474a43460963..68333f523b35 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -225,7 +225,7 @@ static void sci_remote_node_context_continue_state_transitions(struct sci_remote
case RNC_DEST_READY:
case RNC_DEST_SUSPENDED_RESUME:
rnc->destination_state = RNC_DEST_READY;
- /* Fall through... */
+ fallthrough;
case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
@@ -601,9 +601,9 @@ enum sci_status sci_remote_node_context_suspend(
__func__, sci_rnc);
return SCI_FAILURE_INVALID_STATE;
}
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_RESUMING:
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_POSTING:
/* Set the destination state to AWAIT - this signals the
* entry into the SCI_RNC_READY state that a suspension
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 6561a07db189..6e0817941fa7 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -894,7 +894,7 @@ sci_io_request_terminate(struct isci_request *ireq)
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- /* Fall through - and handle like ABORTING... */
+ fallthrough; /* and handle like ABORTING */
case SCI_REQ_ABORTING:
if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
set_bit(IREQ_PENDING_ABORT, &ireq->flags);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 19721db23283..d8cbc9c0e766 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
- if (IS_ERR(fp))
- goto redisc;
+ if (IS_ERR(fp)) {
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ goto out;
+ }
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp)
@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
new_rdata->disc_id = disc->disc_id;
fc_rport_login(new_rdata);
}
- goto out;
+ goto free_fp;
}
rdata->disc_id = disc->disc_id;
mutex_unlock(&rdata->rp_mutex);
@@ -626,6 +630,8 @@ redisc:
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
+free_fp:
+ fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
if (!IS_ERR(fp))
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16eb3b60ed58..96a2952cf626 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2108,7 +2108,7 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
switch (op) {
case ELS_LS_RJT:
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
- /* fall through */
+ fallthrough;
case ELS_LS_ACC:
goto cleanup;
default:
@@ -2622,7 +2622,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
case FC_EOF_T:
if (f_ctl & FC_FC_END_SEQ)
skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
- /* fall through */
+ fallthrough;
case FC_EOF_N:
if (fh->fh_type == FC_TYPE_BLS)
fc_exch_recv_bls(ema->mp, fp);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index e11d4f002bd4..7cfeb6886237 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -752,7 +752,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
brp = fc_frame_payload_get(fp, sizeof(*brp));
if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
break;
- /* fall thru */
+ fallthrough;
default:
/*
* we will let the command timeout
@@ -1536,7 +1536,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
"device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
- /* fall through */
+ fallthrough;
case ELS_RJT_UNSUP:
FC_FCP_DBG(fsp, "device does not support REC\n");
rpriv = fsp->rport->dd_data;
@@ -1668,7 +1668,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
- /* fall through */
+ fallthrough;
case -FC_EX_TIMEOUT:
/*
@@ -1830,7 +1830,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
case -FC_EX_CLOSED: /* e.g., link failure */
FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
- /* fall through */
+ fallthrough;
default:
fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index b84dbc316df1..6557fda85c5c 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1578,7 +1578,7 @@ static void fc_lport_timeout(struct work_struct *work)
case LPORT_ST_DPRT:
FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
fc_lport_state(lport));
- /* fall thru */
+ fallthrough;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 18663a82865f..a60b228d13f1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1723,7 +1723,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
kref_put(&rdata->kref, fc_rport_destroy);
goto busy;
}
- /* fall through */
+ fallthrough;
default:
FC_RPORT_DBG(rdata,
"Reject ELS 0x%02x while in state %s\n",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 49c8a1818baf..1e9c3171fa9f 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -248,7 +248,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
hdr_lun = scsilun_to_int(&tmf->lun);
if (hdr_lun != task->sc->device->lun)
return 0;
- /* fall through */
+ fallthrough;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
/*
* Fail all SCSI cmd PDUs
@@ -1674,7 +1674,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
sc->result = DID_NO_CONNECT << 16;
break;
}
- /* fall through */
+ fallthrough;
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -2239,7 +2239,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
"progress\n");
goto success;
}
- /* fall through */
+ fallthrough;
default:
conn->tmf_state = TMF_INITIAL;
goto failed;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 6ef93c7af954..37e5d4e48c2f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -772,7 +772,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
iscsi_tcp_data_recv_prep(tcp_conn);
return 0;
}
- /* fall through */
+ fallthrough;
case ISCSI_OP_LOGOUT_RSP:
case ISCSI_OP_NOOP_IN:
case ISCSI_OP_SCSI_TMFUNC_RSP:
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 1b93332daa6b..6a521ba7a616 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -324,7 +324,7 @@ static int smp_ata_check_ready(struct ata_link *link)
case SAS_END_DEVICE:
if (ex_phy->attached_sata_dev)
return sas_ata_clear_pending(dev, ex_phy);
- /* fall through */
+ fallthrough;
default:
return -ENODEV;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index daf951b0b3f5..cd7c7d269f6f 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -108,7 +108,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
rphy = NULL;
break;
}
- /* fall through */
+ fallthrough;
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port->port);
break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index b7d1b1ea185d..8d6bcc19359f 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1096,7 +1096,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
} else
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* fallthrough */
+ fallthrough;
case SAS_EDGE_EXPANDER_DEVICE:
child = sas_ex_discover_expander(dev, phy_id);
break;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9e0975e55c27..1bf939818c98 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -622,7 +622,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
- /* fallthrough */
+ fallthrough;
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
pr_notice("task 0x%p is not at LU: I_T recover\n",
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a62c60ca6477..ece6c250ebaf 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -6679,9 +6679,15 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
}
} else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
case LPFC_ASYNC_LINK_SPEED_10GBPS:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
+ break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
@@ -7406,12 +7412,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
void
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{
- if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+ int logit = 0;
+
+ if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
+ logit = 1;
+ }
+ if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ logit = 1;
+ }
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
phba->cfg_irq_chann = phba->cfg_hdw_queue;
+ logit = 1;
+ }
+ if (logit)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2006 Reducing Queues - CPU limitation: "
+ "IRQ %d HDWQ %d\n",
+ phba->cfg_irq_chann,
+ phba->cfg_hdw_queue);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 1d88fedaf3f0..6f9d648a9b9c 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2494,13 +2494,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
diag_status_reply = (struct diag_status *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3012 Received Run link diag test reply "
"below minimum size (%d): reply_len:%d\n",
- (int)(sizeof(struct fc_bsg_request) +
- sizeof(struct diag_status)),
+ (int)(sizeof(*bsg_reply) +
+ sizeof(*diag_status_reply)),
job->reply_len);
rc = -EINVAL;
goto job_error;
@@ -3418,8 +3417,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job)
event_reply = (struct get_mgmt_rev_reply *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2741 Received GET_DFC_REV reply below "
"minimum size\n");
@@ -5202,8 +5200,8 @@ lpfc_menlo_cmd(struct bsg_job *job)
goto no_dd_data;
}
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+ if (job->reply_len < sizeof(*bsg_reply) +
+ sizeof(struct menlo_response)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2785 Received MENLO_CMD reply below "
"minimum size\n");
@@ -5359,9 +5357,7 @@ lpfc_forced_link_speed(struct bsg_job *job)
forced_reply = (struct forced_link_speed_support_reply *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct forced_link_speed_support_reply)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"0049 Received FORCED_LINK_SPEED reply below "
"minimum size\n");
@@ -5715,8 +5711,7 @@ lpfc_get_trunk_info(struct bsg_job *job)
event_reply = (struct lpfc_trunk_info *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2728 Received GET TRUNK _INFO reply below "
"minimum size\n");
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index dd9f2bf54edd..d0141a23a833 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -713,7 +713,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a GID_FT completing so the gidft_inp counter was
* incremented before the GID_FT was issued to the wire.
*/
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
/*
* Skip processing the NS response
@@ -741,11 +742,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* CT command is being retried */
- vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, type);
if (rc == 0)
goto out;
+ else { /* Unable to send NS cmd */
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
+ }
}
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
@@ -825,7 +829,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
}
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -918,7 +923,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a GID_PT completing so the gidft_inp counter was
* incremented before the GID_PT was issued to the wire.
*/
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
/*
* Skip processing the NS response
@@ -942,11 +948,14 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_ns_retry++;
/* CT command is being retried */
- vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
vport->fc_ns_retry, GID_PT_N_PORT);
if (rc == 0)
goto out;
+ else { /* Unable to send NS cmd */
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
+ }
}
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
@@ -1027,7 +1036,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation);
}
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3192,7 +3202,7 @@ port_out:
case SLI_MGMT_GHAT:
case SLI_MGMT_GRPL:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
@@ -3205,7 +3215,7 @@ port_out:
case SLI_MGMT_GPAT:
case SLI_MGMT_GPAS:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DPRT:
case SLI_MGMT_DPA:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 85d4e4000c25..6aae61d6ee16 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3937,10 +3937,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has
* rejected the PRLI - can't do it now. Delay
- * for 1 second and try again - don't care about
- * the explanation.
+ * for 1 second and try again.
+ *
+ * However, if explanation is REQ_UNSUPPORTED there's
+ * no point to retry PRLI.
*/
- if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
+ if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
+ stat.un.b.lsRjtRsnCodeExp !=
+ LSEXP_REQ_UNSUPPORTED) {
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
@@ -9130,7 +9134,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_put(ndlp);
return;
}
- /* fall through */
+ fallthrough;
default:
/* Try to recover from this error */
if (phba->sli_rev == LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 142a02114479..d32c7e7ab09d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4728,15 +4728,14 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
return 1;
- /* fall through */
+ fallthrough;
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
return 1;
- /* fall through */
+ fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
- /* fall through */
}
} else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
@@ -6055,7 +6054,7 @@ restart_disc:
case LPFC_LINK_UP:
lpfc_issue_clear_la(phba, vport);
- /* fall through */
+ fallthrough;
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c4a7e82d3ff2..c69725999315 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4577,6 +4577,13 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
struct lpfc_hba *phba = vport->phba;
fc_host_supported_speeds(shost) = 0;
+ /*
+ * Avoid reporting supported link speed for FCoE as it can't be
+ * controlled via FCoE.
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE)
+ return;
+
if (phba->lmt & LMT_128Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
if (phba->lmt & LMT_64Gb)
@@ -4910,6 +4917,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_ASYNC_LINK_SPEED_40GBPS:
port_speed = 40000;
break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ port_speed = 100000;
+ break;
default:
port_speed = 0;
}
@@ -8589,7 +8599,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
@@ -8603,7 +8613,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_eq,
phba->sli4_hba.max_cfg_param.max_cq,
phba->sli4_hba.max_cfg_param.max_wq,
- phba->sli4_hba.max_cfg_param.max_rq);
+ phba->sli4_hba.max_cfg_param.max_rq,
+ phba->lmt);
/*
* Calculate queue resources based on how
@@ -8626,7 +8637,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2005 Reducing Queues: "
+ "2005 Reducing Queues - "
+ "FW resource limitation: "
"WQ %d CQ %d EQ %d: min %d: "
"IRQ %d HDWQ %d\n",
phba->sli4_hba.max_cfg_param.max_wq,
@@ -14100,17 +14112,18 @@ lpfc_init(void)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
+ error = -ENOMEM;
lpfc_transport_functions.vport_create = lpfc_vport_create;
lpfc_transport_functions.vport_delete = lpfc_vport_delete;
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
if (lpfc_transport_template == NULL)
- return -ENOMEM;
+ goto unregister;
lpfc_vport_transport_template =
fc_attach_transport(&lpfc_vport_transport_functions);
if (lpfc_vport_transport_template == NULL) {
fc_release_transport(lpfc_transport_template);
- return -ENOMEM;
+ goto unregister;
}
lpfc_nvme_cmd_template();
lpfc_nvmet_cmd_template();
@@ -14136,6 +14149,8 @@ unwind:
cpuhp_failure:
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
+unregister:
+ misc_deregister(&lpfc_mgmt_dev);
return error;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e4c710fe0245..92d6e7b98770 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -464,7 +464,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
break;
- /* fall through */
+ fallthrough;
case NLP_STE_REG_LOGIN_ISSUE:
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
@@ -1745,7 +1745,13 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
}
}
- if (ndlp->nlp_type & NLP_FCP_TARGET) {
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
} else {
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index e5be334d6a11..0c39ed50998c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1225,7 +1225,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
bf_get(lpfc_wcqe_c_xb, wcqe));
- /* fall through */
+ fallthrough;
default:
out_err:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index a4430aeeb04a..d4ade7cdb93a 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2110,7 +2110,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+ if (!wait_for_completion_timeout(&tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6179 Unreg targetport x%px timeout "
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 5e802c8b22a9..983eeb0e3d07 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1093,7 +1093,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the error
@@ -1213,7 +1213,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the
@@ -1295,7 +1295,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (op) {
case SCSI_PROT_WRITE_PASS:
rc = BG_ERR_CHECK;
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
@@ -3980,7 +3980,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->cur_iocbq.sli4_lxritag,
0, 0);
}
- /* fall through */
+ fallthrough;
default:
cmd->result = DID_ERROR << 16;
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8582b51b0613..e158cd77d387 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9339,7 +9339,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
*/
if (piocb->iocb_cmpl)
piocb->iocb_cmpl = NULL;
- /*FALLTHROUGH*/
+ fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
@@ -9653,7 +9653,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
cmnd = CMD_XMIT_SEQUENCE64_CR;
if (phba->link_flag & LS_LOOPBACK_MODE)
bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- /* fall through */
+ fallthrough;
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -13630,7 +13630,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2537 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -13650,7 +13650,11 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Handle MDS Loopback frames */
- lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli4_handle_mds_loopback(phba->pport,
+ dma_buf);
+ else
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
break;
}
@@ -13674,7 +13678,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
@@ -14158,7 +14162,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6126 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -14205,7 +14209,7 @@ drop:
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
@@ -15092,7 +15096,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
@@ -15234,7 +15238,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0361 Unsupported CQ count: "
@@ -15245,7 +15249,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
@@ -15413,7 +15417,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3118 Bad CQ count. (%d)\n",
@@ -15422,7 +15426,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest */
+ fallthrough; /* otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15698,7 +15702,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
@@ -16119,7 +16123,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -16256,7 +16260,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -18363,7 +18367,10 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
vport = phba->pport;
/* Handle MDS Loopback frames */
- lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ else
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 1987c6666279..20adec4387f0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.2"
+#define LPFC_DRIVER_VERSION "12.8.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 0484ee52ae80..ac406049e7c8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -491,9 +491,9 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
if (adapter->support_random_del && adapter->read_ldidmap )
switch (cmd->cmnd[0]) {
- case READ_6: /* fall through */
- case WRITE_6: /* fall through */
- case READ_10: /* fall through */
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
case WRITE_10:
ldrv_num += 0x80;
}
@@ -852,7 +852,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE: /* Fall through */
+ case RESERVE:
case RELEASE:
/*
@@ -987,7 +987,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
@@ -1050,7 +1050,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
epthru->numsgelements = mega_build_sglist(adapter, scb,
&epthru->dataxferaddr, &epthru->dataxferlen);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 19469a2c0ea3..4a27ac869f2e 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1581,7 +1581,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return NULL;
}
- /* Fall through */
+ fallthrough;
case READ_CAPACITY:
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 861f7140f52e..2b7e7b5f38ed 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3522,7 +3522,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
megasas_complete_int_cmd(instance, cmd);
break;
}
- /* fall through */
+ fallthrough;
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 0824410f78f8..883cccb59c2d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3534,7 +3534,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
- /* Fall through - and complete IO */
+ fallthrough; /* and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
atomic_dec(&instance->fw_outstanding);
if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index fd1d03064079..0a9f4e44ab2c 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1457,7 +1457,7 @@ static void cmd_complete(struct mesh_state *ms)
/* huh? we expected a phase mismatch */
ms->n_msgin = 0;
ms->msgphase = msg_in;
- /* fall through */
+ fallthrough;
case msg_in:
/* should have some message bytes in fifo */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1d64524cd863..5730f32496b6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4681,7 +4681,7 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "performance mode: balanced\n");
return;
}
- /* Fall through */
+ fallthrough;
case MPT_PERF_MODE_LATENCY:
/*
* Enable interrupt coalescing on all reply queues
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 43260306668c..7c119b904834 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1002,7 +1002,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
/* drop to default case for posting the request */
}
- /* fall through */
+ fallthrough;
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 08fc4b381056..2e2756d8a49b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5470,7 +5470,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
- /* fall through */
+ fallthrough;
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -6480,7 +6480,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- /* fall through */
+ fallthrough;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -7208,7 +7208,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
- /* fall through */
+ fallthrough;
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
@@ -10653,7 +10653,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
dev_info(&pdev->dev,
"HBA is in Configurable Secure mode\n");
- /* fall through */
+ fallthrough;
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index d4bd31a75b9d..b2869c5dd7fb 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -650,7 +650,7 @@ static void myrb_bgi_control(struct myrb_hba *cb)
if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
sdev_printk(KERN_INFO, sdev,
"Background Initialization Aborted\n");
- /* Fallthrough */
+ fallthrough;
case MYRB_STATUS_NO_BGI_INPROGRESS:
cb->bgi_status.status = MYRB_BGI_INVALID;
break;
@@ -1528,7 +1528,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_6:
lba = (((scmd->cmnd[1] & 0x1F) << 16) |
(scmd->cmnd[2] << 8) |
@@ -1545,7 +1545,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_10:
case VERIFY: /* 0x2F */
case WRITE_VERIFY: /* 0x2E */
@@ -1562,7 +1562,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_12:
case VERIFY_12: /* 0xAF */
case WRITE_VERIFY_12: /* 0xAE */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index f88adab3f913..03d70138ad58 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -3640,7 +3640,7 @@ ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len)
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
break;
@@ -3910,14 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np)
np->scsi_mode = SMODE_HVD;
break;
}
- /* fall through */
+ fallthrough;
case 3: /* SYMBIOS controllers report HVD through GPIO3 */
if (INB(nc_gpreg) & 0x08)
break;
- /* fall through */
+ fallthrough;
case 2: /* Set HVD unconditionally */
np->scsi_mode = SMODE_HVD;
- /* fall through */
+ fallthrough;
case 1: /* Trust previous settings for HVD */
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
@@ -4296,7 +4296,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
break;
cp->phys.header.wgoalp = cpu_to_scr(goalp);
cp->phys.header.wlastp = cpu_to_scr(lastp);
- /* fall through */
+ fallthrough;
case DMA_FROM_DEVICE:
goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
if (segments <= MAX_SCATTERL)
@@ -6717,7 +6717,7 @@ void ncr_int_sir (struct ncb *np)
OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
- /* fall through */
+ fallthrough;
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
@@ -6825,7 +6825,7 @@ void ncr_int_sir (struct ncb *np)
*/
OUTB (HS_PRT, HS_BUSY);
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
/*-------------------------------------------------------
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 8655ff1249bb..bc5a623519e7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1113,7 +1113,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
nsp_scsi_done(tmpSC);
return IRQ_HANDLED;
}
- /* fall thru */
+ fallthrough;
default:
if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) {
return IRQ_HANDLED;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 0ae800c5b739..aa41f7ac91cb 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -717,7 +717,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.phase++;
}
- /* fall through */
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!ppa_select(dev, scmd_id(cmd))) {
@@ -725,7 +725,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -735,7 +735,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -749,7 +749,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -762,7 +762,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3f04f2c81366..5ca424df355c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3863,7 +3863,7 @@ void qedf_stag_change_work(struct work_struct *work)
container_of(work, struct qedf_ctx, stag_work.work);
if (!qedf) {
- QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ QEDF_ERR(NULL, "qedf is NULL");
return;
}
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 91eb6901815c..e1d7de63e8f8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -380,5 +380,8 @@ extern int qla24xx_soft_reset(struct qla_hw_data *);
static inline int
ql_mask_match(uint level)
{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+
return (level & ql2xextended_error_logging) == level;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8c92af5e4390..1bc090d8a71b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3880,6 +3880,7 @@ struct qla_hw_data {
uint32_t scm_supported_f:1;
/* Enabled in Driver */
uint32_t scm_enabled:1;
+ uint32_t max_req_queue_warned:1;
} flags;
uint16_t max_exchg;
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index df670fba2ab8..b569fd6e96d6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -177,7 +177,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
case CS_TIMEOUT:
rval = QLA_FUNCTION_TIMEOUT;
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -1505,11 +1505,11 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
static uint
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
{
+ uint speeds = 0;
+
if (IS_CNA_CAPABLE(ha))
return FDMI_PORT_SPEED_10GB;
if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
- uint speeds = 0;
-
if (ha->max_supported_speed == 2) {
if (ha->min_supported_speed <= 6)
speeds |= FDMI_PORT_SPEED_64GB;
@@ -1536,9 +1536,16 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
}
return speeds;
}
- if (IS_QLA2031(ha))
- return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB;
+ if (IS_QLA2031(ha)) {
+ if ((ha->pdev->subsystem_vendor == 0x103C) &&
+ (ha->pdev->subsystem_device == 0x8002)) {
+ speeds = FDMI_PORT_SPEED_16GB;
+ } else {
+ speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
if (IS_QLA25XX(ha))
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
@@ -3436,7 +3443,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
fcport->scan_state = QLA_FCPORT_SCAN;
- fcport->logout_on_delete = 0;
}
}
goto login_logout;
@@ -3532,10 +3538,22 @@ login_logout:
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ bool do_delete = false;
+
+ if (fcport->scan_needed &&
+ fcport->disc_state == DSC_LOGIN_PEND) {
+ /* Cable got disconnected after we sent
+ * a login. Do delete to prevent timeout.
+ */
+ fcport->logout_on_delete = 1;
+ do_delete = true;
+ }
+
fcport->scan_needed = 0;
- if ((qla_dual_mode_enabled(vha) ||
- qla_ini_mode_enabled(vha)) &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
+ if (((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) ||
+ do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
fcport->logout_on_delete = 0;
@@ -3736,6 +3754,18 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
unsigned long flags;
const char *name = sp->name;
+ if (res == QLA_OS_TIMER_EXPIRED) {
+ /* switch is ignoring all commands.
+ * This might be a zone disable behavior.
+ * This means we hit 64s timeout.
+ * 22s GPNFT + 44s Abort = 64s
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: Switch Zone check please .\n",
+ name);
+ qla2x00_mark_all_devices_lost(vha);
+ }
+
/*
* We are in an Interrupt context, queue up this
* sp for GNNFT_DONE work. This will allow all
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 57a2d76aa691..507919d4ab36 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -857,7 +857,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport);
break;
}
- /* fall through */
+ fallthrough;
default:
if (fcport_is_smaller(fcport)) {
/* local adapter is bigger */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e3d2dea0b057..0954fa41911c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2874,7 +2874,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x20eb,
"%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 27bcd346af7c..25e0a1684763 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1580,11 +1580,11 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
- /* fall through */
+ fallthrough;
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
- /* Fallthru */
+ fallthrough;
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
IS_QLA8044(ha))
@@ -2024,8 +2024,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
res = DID_ERROR << 16;
}
}
- ql_dbg(ql_dbg_user, vha, 0x503f,
- "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
+ ql_dbg(ql_dbg_disc, vha, 0x503f,
+ "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
@@ -2188,7 +2188,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* fall through */
+ fallthrough;
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -2368,7 +2368,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
fcport->nvme_flag |= NVME_FLAG_RESETTING;
- /* fall through */
+ fallthrough;
case CS_ABORTED:
case CS_PORT_BUSY:
fd->transferred_length = 0;
@@ -3485,7 +3485,7 @@ process_err:
} else {
qlt_24xx_process_atio_queue(vha, 1);
}
- /* fall through */
+ fallthrough;
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 73883435ab58..226f1428d3e5 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -334,14 +334,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (time_after(jiffies, wait_time))
break;
- /*
- * Check if it's UNLOADING, cause we cannot poll in
- * this case, or else a NULL pointer dereference
- * is triggered.
- */
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
- return QLA_FUNCTION_TIMEOUT;
-
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
@@ -5240,7 +5232,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
@@ -5428,7 +5420,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
@@ -5700,7 +5692,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (mb != NULL) {
@@ -5787,7 +5779,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -5822,7 +5814,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -6014,7 +6006,7 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (IS_QLA8031(ha))
mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -6050,7 +6042,7 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA8031(ha))
mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index fa695a4007f8..90bbc61f361b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -536,6 +536,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
+ if (!priv) {
+ /* nvme association has been torn down */
+ return rval;
+ }
+
fcport = qla_rport->fcport;
if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
@@ -687,7 +692,15 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
tmpl = &qla_nvme_fc_transport;
WARN_ON(vha->nvme_local_port);
- WARN_ON(ha->max_req_queues < 3);
+
+ if (ha->max_req_queues < 3) {
+ if (!ha->flags.max_req_queue_warned)
+ ql_log(ql_log_info, vha, 0x2120,
+ "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
+ __func__, ha->max_req_queues);
+ ha->flags.max_req_queue_warned = 1;
+ return ret;
+ }
qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9b59f032a569..8da00ba54aec 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2017,6 +2017,11 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
+
+ /* Check if FW supports MQ or not */
+ if (!(ha->fw_attributes & BIT_6))
+ goto mqiobase_exit;
+
if (!ql2xmqsupport || !ql2xnvmeenable ||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
@@ -2829,10 +2834,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* This may fail but that's ok */
pci_enable_pcie_error_reporting(pdev);
- /* Turn off T10-DIF when FC-NVMe is enabled */
- if (ql2xnvmeenable)
- ql2xenabledif = 0;
-
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index e161c05d7d82..411b8a9ff393 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2457,7 +2457,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x10000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x1f: /* Atmel flash. */
/* 512k sector size. */
@@ -2466,7 +2466,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x80000000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x01: /* AMD flash. */
if (flash_id == 0x38 || flash_id == 0x40 ||
@@ -2499,7 +2499,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x1e000;
break;
}
- /* fall through */
+ fallthrough;
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fbb80a043b4f..2d445bdb2129 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -442,7 +442,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
- /* fall through */
+ fallthrough;
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -1270,7 +1270,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
@@ -4423,7 +4423,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- /* fall through */
+ fallthrough;
case QLA_TGT_CLEAR_ACA:
h = qlt_find_qphint(vha, mcmd->unpacked_lun);
mcmd->qpair = h->qpair;
@@ -5057,7 +5057,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* fall through */
+ fallthrough;
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index bab87e47b238..676778cbc550 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2907,7 +2907,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
chap_tbl.secret_len);
}
}
- /* fall through */
+ fallthrough;
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 3790e8b70bba..48ff7d88af86 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -200,15 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Write mailbox command registers. */
switch (mbox_param[param[0]] >> 4) {
case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
}
@@ -259,15 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Read back output parameters. */
switch (mbox_param[param[0]] & 0xf) {
case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 064ed680c053..1ad7260d4758 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4482,8 +4482,6 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
goto fini;
}
- if (zc == ZC2_IMPLICIT_OPEN)
- zbc_close_zone(devip, zsp);
zbc_open_zone(devip, zsp, true);
fini:
write_unlock(macc_lckp);
@@ -5490,9 +5488,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
u64 d = ktime_get_boottime_ns() - ns_from_boot;
if (kt <= d) { /* elapsed duration >= kt */
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
sqcp->a_cmnd = NULL;
atomic_dec(&devip->num_in_q);
clear_bit(k, sqp->in_use_bm);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (new_sd_dp)
kfree(sd_dp);
/* call scsi_done() from this thread */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 927b1e641842..7d3571a2bd89 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -599,7 +599,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
set_host_byte(scmd, DID_ALLOC_FAILURE);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
@@ -621,7 +621,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
return ADD_TO_MLQUEUE;
else
set_host_byte(scmd, DID_TARGET_FAILURE);
- /* FALLTHROUGH */
+ fallthrough;
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
@@ -734,7 +734,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
switch (status_byte(scmd->result)) {
case GOOD:
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
+ fallthrough;
case COMMAND_TERMINATED:
return SUCCESS;
case CHECK_CONDITION:
@@ -755,7 +755,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
return FAILED;
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
- /* fall through */
+ fallthrough;
case BUSY:
return NEEDS_RETRY;
default:
@@ -1302,7 +1302,7 @@ retry_tur:
case NEEDS_RETRY:
if (retry_cnt--)
goto retry_tur;
- /*FALLTHRU*/
+ fallthrough;
case SUCCESS:
return 0;
default:
@@ -1739,7 +1739,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
return 0;
- /* fall through */
+ fallthrough;
case DID_SOFT_ERROR:
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
@@ -1810,7 +1810,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
set_host_byte(scmd, DID_TIME_OUT);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case DID_NO_CONNECT:
case DID_BAD_TARGET:
/*
@@ -1854,7 +1854,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* lower down
*/
break;
- /* fallthrough */
+ fallthrough;
case DID_BUS_BUSY:
case DID_PARITY:
goto maybe_retry;
@@ -1892,7 +1892,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* the case of trying to send too many commands to a
* tagged queueing device.
*/
- /* FALLTHROUGH */
+ fallthrough;
case BUSY:
/*
* device can't talk to us at the moment. Should only
@@ -1905,7 +1905,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
+ fallthrough;
case COMMAND_TERMINATED:
return SUCCESS;
case TASK_ABORTED:
@@ -2376,22 +2376,22 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
rtn = scsi_try_bus_device_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_TARGET:
rtn = scsi_try_target_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_BUS:
rtn = scsi_try_bus_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_HOST:
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
rtn = FAILED;
break;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 45d04b7b2643..14872c9dc78c 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -117,14 +117,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
case NOT_READY: /* This happens if there is no disc in drive */
if (sdev->removable)
break;
- /* FALLTHROUGH */
+ fallthrough;
case UNIT_ATTENTION:
if (sdev->removable) {
sdev->changed = 1;
result = 0; /* This is no longer considered an error */
break;
}
- /* FALLTHROUGH -- for non-removable media */
+ fallthrough; /* for non-removable media */
default:
sdev_printk(KERN_INFO, sdev,
"ioctl_internal_command return code = %x\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7c6dd6f75190..7affaaf8b98e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -795,7 +795,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
}
if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
return;
- /*FALLTHRU*/
+ fallthrough;
case ACTION_REPREP:
scsi_io_completion_reprep(cmd, q);
break;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index e443dee43bcf..c9abed8429c9 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1526,7 +1526,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
list_add_tail(&rphy->list, &sas_host->rphy_list);
if (identify->device_type == SAS_END_DEVICE &&
(identify->target_port_protocols &
- (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
+ (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA)))
rphy->scsi_target_id = sas_host->next_target_id++;
else if (identify->device_type == SAS_END_DEVICE)
rphy->scsi_target_id = -1;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index acde0ca35769..95018e650f2d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2578,8 +2578,6 @@ sd_print_capacity(struct scsi_disk *sdkp,
sd_printk(KERN_NOTICE, sdkp,
"%u-byte physical blocks\n",
sdkp->physical_block_size);
-
- sd_zbc_print_zones(sdkp);
}
/* called with buffer of length 512 */
@@ -3220,6 +3218,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_config_write_same(sdkp);
kfree(buffer);
+ /*
+ * For a zoned drive, revalidating the zones can be done only once
+ * the gendisk capacity is set. So if this fails, set back the gendisk
+ * capacity to 0.
+ */
+ if (sd_zbc_revalidate_zones(sdkp))
+ set_capacity_revalidate_and_notify(disk, 0, false);
+
out:
return 0;
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 27c0f4e9b1d4..4933e7daf17d 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -75,7 +75,9 @@ struct scsi_disk {
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
u32 nr_zones;
+ u32 rev_nr_zones;
u32 zone_blocks;
+ u32 rev_zone_blocks;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
@@ -215,8 +217,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
int sd_zbc_init_disk(struct scsi_disk *sdkp);
void sd_zbc_release_disk(struct scsi_disk *sdkp);
-extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
@@ -242,7 +244,10 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
return 0;
}
-static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+{
+ return 0;
+}
static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op,
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 4717e79bff55..0e94ff056bff 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -634,6 +634,23 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
+static void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+ if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+ return;
+
+ if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks + 1 runt zone\n",
+ sdkp->nr_zones - 1,
+ sdkp->zone_blocks);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks\n",
+ sdkp->nr_zones,
+ sdkp->zone_blocks);
+}
+
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
@@ -641,36 +658,31 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
}
-static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp,
- u32 zone_blocks,
- unsigned int nr_zones)
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
{
struct gendisk *disk = sdkp->disk;
+ struct request_queue *q = disk->queue;
+ u32 zone_blocks = sdkp->rev_zone_blocks;
+ unsigned int nr_zones = sdkp->rev_nr_zones;
+ u32 max_append;
int ret = 0;
+ if (!sd_is_zoned(sdkp))
+ return 0;
+
/*
* Make sure revalidate zones are serialized to ensure exclusive
* updates of the scsi disk data.
*/
mutex_lock(&sdkp->rev_mutex);
- /*
- * Revalidate the disk zones to update the device request queue zone
- * bitmaps and the zone write pointer offset array. Do this only once
- * the device capacity is set on the second revalidate execution for
- * disk scan or if something changed when executing a normal revalidate.
- */
- if (sdkp->first_scan) {
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
- goto unlock;
- }
-
if (sdkp->zone_blocks == zone_blocks &&
sdkp->nr_zones == nr_zones &&
disk->queue->nr_zones == nr_zones)
goto unlock;
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
if (!sdkp->rev_wp_offset) {
ret = -ENOMEM;
@@ -682,6 +694,21 @@ static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp,
kvfree(sdkp->rev_wp_offset);
sdkp->rev_wp_offset = NULL;
+ if (ret) {
+ sdkp->zone_blocks = 0;
+ sdkp->nr_zones = 0;
+ sdkp->capacity = 0;
+ goto unlock;
+ }
+
+ max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
+ q->limits.max_segments << (PAGE_SHIFT - 9));
+ max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
+
+ blk_queue_max_zone_append_sectors(q, max_append);
+
+ sd_zbc_print_zones(sdkp);
+
unlock:
mutex_unlock(&sdkp->rev_mutex);
@@ -694,7 +721,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
- u32 max_append;
int ret;
if (!sd_is_zoned(sdkp))
@@ -728,22 +754,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
sdkp->device->use_16_for_rw = 1;
sdkp->device->use_10_for_rw = 0;
- ret = sd_zbc_revalidate_zones(sdkp, zone_blocks, nr_zones);
- if (ret)
- goto err;
-
- /*
- * On the first scan 'chunk_sectors' isn't setup yet, so calling
- * blk_queue_max_zone_append_sectors() will result in a WARN(). Defer
- * this setting to the second scan.
- */
- if (sdkp->first_scan)
- return 0;
-
- max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
- q->limits.max_segments << (PAGE_SHIFT - 9));
-
- blk_queue_max_zone_append_sectors(q, max_append);
+ sdkp->rev_nr_zones = nr_zones;
+ sdkp->rev_zone_blocks = zone_blocks;
return 0;
@@ -753,23 +765,6 @@ err:
return ret;
}
-void sd_zbc_print_zones(struct scsi_disk *sdkp)
-{
- if (!sd_is_zoned(sdkp) || !sdkp->capacity)
- return;
-
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
- sd_printk(KERN_NOTICE, sdkp,
- "%u zones of %u logical blocks + 1 runt zone\n",
- sdkp->nr_zones - 1,
- sdkp->zone_blocks);
- else
- sd_printk(KERN_NOTICE, sdkp,
- "%u zones of %u logical blocks\n",
- sdkp->nr_zones,
- sdkp->zone_blocks);
-}
-
int sd_zbc_init_disk(struct scsi_disk *sdkp)
{
if (!sd_is_zoned(sdkp))
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index bd38c8cea56e..ca1e6cf6a38e 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -516,7 +516,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SENSE_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
case BMIC_SENSE_SUBSYSTEM_INFORMATION:
@@ -527,7 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SET_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_WRITE_HOST_WELLNESS:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
@@ -2324,7 +2324,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
switch (scmd->cmnd[0]) {
case WRITE_6:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_6:
first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
@@ -2334,21 +2334,21 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
break;
case WRITE_10:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_10:
first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
break;
case WRITE_12:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_12:
first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
break;
case WRITE_16:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_16:
first_block = get_unaligned_be64(&scmd->cmnd[2]);
block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
@@ -2948,7 +2948,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
if (io_request->scmd)
io_request->scmd->result = 0;
- /* fall through */
+ fallthrough;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
case PQI_RESPONSE_IU_VENDOR_GENERAL:
@@ -3115,12 +3115,11 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
switch (reset_status) {
case RESET_INITIATE_DRIVER:
- /* fall through */
case RESET_TIMEDOUT:
dev_info(&ctrl_info->pci_dev->dev,
"resetting controller %u\n", ctrl_info->ctrl_id);
sis_soft_reset(ctrl_info);
- /* fall through */
+ fallthrough;
case RESET_INITIATE_FIRMWARE:
rc = pqi_ofa_ctrl_restart(ctrl_info);
pqi_ofa_free_host_buffer(ctrl_info);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0c4aa4665a2f..3b3a53c6a0de 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -877,10 +877,10 @@ static void get_sectorsize(struct scsi_cd *cd)
case 2340:
case 2352:
sector_size = 2048;
- /* fall through */
+ fallthrough;
case 2048:
cd->capacity *= 4;
- /* fall through */
+ fallthrough;
case 512:
break;
default:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 87fbc0ea350b..e2e5356a997d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -339,14 +339,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
switch (sense[0] & 0x7f) {
case 0x71:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x70:
s->fixed_format = 1;
s->flags = sense[2] & 0xe0;
break;
case 0x73:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x72:
s->fixed_format = 0;
ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
@@ -2723,7 +2723,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
switch (cmd_in) {
case MTFSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTFSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
@@ -2738,7 +2738,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
break;
case MTBSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTBSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 701b842296f0..2e3fbc2fae97 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -397,12 +397,12 @@ static int sun3scsi_dma_finish(int write_flag)
case CSR_LEFT_3:
*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_2:
*vaddr = (dregs->bpack_hi & 0x00ff);
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_1:
*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 6d7651a7847e..c6db61b61de3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -523,7 +523,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
new = 0;
panic("sym_fw_bind_script: "
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 8410117d5aa4..cc11daa1222b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3059,7 +3059,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb
sym_print_addr(cp->cmd, "%s\n",
s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
}
- /* fall through */
+ fallthrough;
default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
sym_complete_error (np, cp);
break;
@@ -4620,7 +4620,7 @@ static void sym_int_sir(struct sym_hcb *np)
* Negotiation failed.
* Target does not want answer message.
*/
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
sym_nego_default(np, tp, cp);
goto out;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index d37e2a69136a..e13d5351f155 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -695,7 +695,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
data, len);
if (!x)
break;
- /* fall through */
+ fallthrough;
default:
x = sym_read_T93C46_nvram(np, nvram);
break;
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index 46bb905b4d6a..eafe0db98d54 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(dev, "Cannot claim MPHY clock.\n");
goto clk_err;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 29cd017c1aa0..1755dd6b04ae 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -212,7 +212,7 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
ktime_t timeout, time_checked;
u32 val;
- timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms));
+ timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index bcfbbd0d5c45..5b2bc1a6f922 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -110,7 +110,7 @@ static int ufs_bsg_request(struct bsg_job *job)
goto out;
}
- /* fall through */
+ fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f407b13883ac..5a95a7bfbab0 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -44,11 +44,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err;
}
+static int ufs_intel_ehl_init(struct ufs_hba *hba)
+{
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ return 0;
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.link_startup_notify = ufs_intel_link_startup_notify,
};
+static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_ehl_init,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_pci_suspend - suspend power management function
@@ -177,8 +189,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 307622284239..1d157ff58d81 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1561,6 +1561,7 @@ unblock_reqs:
int ufshcd_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
+ bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
@@ -1592,7 +1593,9 @@ start:
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
+ flush_result = flush_work(&hba->clk_gating.ungate_work);
+ if (hba->clk_gating.is_suspended && !flush_result)
+ goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -1609,7 +1612,7 @@ start:
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
- /* fallthrough */
+ fallthrough;
case CLKS_OFF:
ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
@@ -1621,7 +1624,7 @@ start:
* fall through to check if we should wait for this
* work to be done or not.
*/
- /* fallthrough */
+ fallthrough;
case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
@@ -4734,7 +4737,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
ufshcd_copy_sense_data(lrbp);
- /* fallthrough */
+ fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
@@ -5941,7 +5944,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status;
+ u32 intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
@@ -5955,7 +5958,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
* read, make sure we handle them by checking the interrupt status
* again in a loop until we process all of the reqs before returning.
*/
- do {
+ while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
@@ -5964,9 +5967,9 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- } while (intr_status && --retries);
+ }
- if (retval == IRQ_NONE) {
+ if (enabled_intr_status && retval == IRQ_NONE) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
__func__, intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
@@ -6274,7 +6277,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
cmd_type = DEV_CMD_TYPE_NOP;
- /* fall through */
+ fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
@@ -6434,14 +6437,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto out;
}
- if (!(reg & (1 << tag))) {
- dev_err(hba->dev,
- "%s: cmd was completed, but without a notifying intr, tag = %d",
- __func__, tag);
- }
-
/* Print Transfer Request of aborted task */
- dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+ dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
/*
* Print detailed info about aborted request.
@@ -6462,6 +6459,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
hba->req_abort_count++;
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ goto cleanup;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
err = -EIO;
@@ -6492,7 +6496,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
/* command completed already */
dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
__func__, tag);
- goto out;
+ goto cleanup;
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
@@ -6526,6 +6530,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto out;
}
+cleanup:
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index b2ef18f1b746..363589c0bd37 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -520,6 +520,12 @@ enum ufshcd_quirks {
* OCS FATAL ERROR with device error through sense data
*/
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
};
enum ufshcd_caps {
@@ -803,7 +809,8 @@ return true;
static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
{
- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
}
static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8cc003aa4d00..3b1803432090 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -148,7 +148,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
resp->response);
- /* fall through */
+ fallthrough;
case VIRTIO_SCSI_S_FAILURE:
set_host_byte(sc, DID_ERROR);
break;
@@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = {
#define virtscsi_config_get(vdev, fld) \
({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
do { \
- typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 8dbb4db6831a..081f54ab7d86 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -607,7 +607,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
cmd->result = (DRIVER_INVALID << 24);
- /* fall through */
+ fallthrough;
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index f81046f0e68a..87dafbc942d3 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1854,7 +1854,7 @@ round_4(unsigned int x)
case 1: --x;
break;
case 2: ++x;
- /* fall through */
+ fallthrough;
case 3: ++x;
}
return x;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index f0068e96a177..259fc248d06c 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -1111,7 +1111,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's Closing state */
+ fallthrough; /* Missed the backend's Closing state */
case XenbusStateClosing:
scsifront_disconnect(info);
break;
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index eeb028b9cdb3..fd72d9088bdc 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -36,21 +36,6 @@ static void sh_clk_write(int value, struct clk *clk)
iowrite32(value, clk->mapped_reg);
}
-static unsigned int r8(const void __iomem *addr)
-{
- return ioread8(addr);
-}
-
-static unsigned int r16(const void __iomem *addr)
-{
- return ioread16(addr);
-}
-
-static unsigned int r32(const void __iomem *addr)
-{
- return ioread32(addr);
-}
-
static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
@@ -61,11 +46,11 @@ static int sh_clk_mstp_enable(struct clk *clk)
(phys_addr_t)clk->enable_reg + clk->mapped_reg;
if (clk->flags & CLK_ENABLE_REG_8BIT)
- read = r8;
+ read = ioread8;
else if (clk->flags & CLK_ENABLE_REG_16BIT)
- read = r16;
+ read = ioread16;
else
- read = r32;
+ read = ioread32;
for (i = 1000;
(read(mapped_status) & (1 << clk->enable_bit)) && i;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index e19102f46302..b25d0f7dac9e 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -353,7 +353,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nmodem_supported);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 14):
qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
@@ -368,14 +368,14 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
&qcom_socinfo->info.num_defective_parts);
debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.ndefective_parts_array_offset);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 13):
qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nproduct_id);
DEBUGFS_ADD(info, chip_id);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
@@ -392,7 +392,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
debugfs_create_x32("raw_device_number", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_device_num);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 11):
case SOCINFO_VERSION(0, 10):
case SOCINFO_VERSION(0, 9):
@@ -400,12 +400,12 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.foundry_id);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 8):
case SOCINFO_VERSION(0, 7):
DEBUGFS_ADD(info, pmic_model);
DEBUGFS_ADD(info, pmic_die_rev);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 6):
qcom_socinfo->info.hw_plat_subtype =
__le32_to_cpu(info->hw_plat_subtype);
@@ -413,7 +413,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
debugfs_create_u32("hardware_platform_subtype", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat_subtype);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 5):
qcom_socinfo->info.accessory_chip =
__le32_to_cpu(info->accessory_chip);
@@ -421,27 +421,27 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
debugfs_create_u32("accessory_chip", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.accessory_chip);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 4):
qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
debugfs_create_u32("platform_version", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.plat_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 3):
qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
debugfs_create_u32("hardware_platform", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 2):
qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 1):
DEBUGFS_ADD(info, build_id);
break;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 42cf37a0556b..d332e5d9abac 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2229,7 +2229,7 @@ static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
case POST_RATE_CHANGE:
pmc->rate = data->new_rate;
- /* fall through */
+ fallthrough;
case ABORT_RATE_CHANGE:
mutex_unlock(&pmc->powergates_lock);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index c3008e423f59..c6ea760ea5f0 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1017,4 +1017,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
endif # SPI_SLAVE
+config SPI_DYNAMIC
+ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+
endif # SPI
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 2f717812c766..03b034c15d2b 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -164,10 +164,10 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
switch (count) {
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
- /* fallthrough */
+ fallthrough;
case 2:
*bs->rx_buf++ = (data >> 8) & 0xff;
- /* fallthrough */
+ fallthrough;
case 1:
*bs->rx_buf++ = (data >> 0) & 0xff;
/* fallthrough - no default */
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 54ad0ac121e5..ee905880769e 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -226,7 +226,7 @@ static void fsl_spi_free_dummy_rx(void)
case 1:
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
- /* fall through */
+ fallthrough;
default:
fsl_dummy_rx_refcnt--;
break;
@@ -294,7 +294,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
switch (mspi->subblock) {
default:
dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
- /* fall through */
+ fallthrough;
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
break;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index bd23c4689b46..127b8bd25831 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -506,7 +506,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev,
"failed to find hwlock id, %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto put_ctlr;
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4c643dfc7fbb..d4b33b358a31 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -13,6 +13,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
@@ -441,7 +442,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
{
u32 div, mbrdiv;
- div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+ /* Ensure spi->clk_rate is even */
+ div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -467,20 +469,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
/**
* stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
* @spi: pointer to the spi controller data structure
+ * @xfer_len: length of the message to be transferred
*/
-static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
{
- u32 fthlv, half_fifo;
+ u32 fthlv, half_fifo, packet;
/* data packet should not exceed 1/2 of fifo space */
half_fifo = (spi->fifo_size / 2);
+ /* data_packet should not exceed transfer length */
+ if (half_fifo > xfer_len)
+ packet = xfer_len;
+ else
+ packet = half_fifo;
+
if (spi->cur_bpw <= 8)
- fthlv = half_fifo;
+ fthlv = packet;
else if (spi->cur_bpw <= 16)
- fthlv = half_fifo / 2;
+ fthlv = packet / 2;
else
- fthlv = half_fifo / 4;
+ fthlv = packet / 4;
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
@@ -488,6 +497,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
else
fthlv -= (fthlv % 4); /* multiple of 4 */
+ if (!fthlv)
+ fthlv = 1;
+
return fthlv;
}
@@ -966,13 +978,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
- writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
+ writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
if (end) {
- spi_finalize_current_transfer(master);
stm32h7_spi_disable(spi);
+ spi_finalize_current_transfer(master);
}
return IRQ_HANDLED;
@@ -1393,7 +1405,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
STM32H7_SPI_CFG1_DSIZE;
- spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
fthlv = spi->cur_fthlv - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
@@ -1585,39 +1597,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
unsigned long flags;
unsigned int comm_type;
int nb_words, ret = 0;
+ int mbr;
spin_lock_irqsave(&spi->lock, flags);
- if (spi->cur_bpw != transfer->bits_per_word) {
- spi->cur_bpw = transfer->bits_per_word;
- spi->cfg->set_bpw(spi);
- }
-
- if (spi->cur_speed != transfer->speed_hz) {
- int mbr;
+ spi->cur_xferlen = transfer->len;
- /* Update spi->cur_speed with real clock speed */
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
- spi->cfg->baud_rate_div_min,
- spi->cfg->baud_rate_div_max);
- if (mbr < 0) {
- ret = mbr;
- goto out;
- }
+ spi->cur_bpw = transfer->bits_per_word;
+ spi->cfg->set_bpw(spi);
- transfer->speed_hz = spi->cur_speed;
- stm32_spi_set_mbr(spi, mbr);
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
}
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
+
comm_type = stm32_spi_communication_type(spi_dev, transfer);
- if (spi->cur_comm != comm_type) {
- ret = spi->cfg->set_mode(spi, comm_type);
+ ret = spi->cfg->set_mode(spi, comm_type);
+ if (ret < 0)
+ goto out;
- if (ret < 0)
- goto out;
-
- spi->cur_comm = comm_type;
- }
+ spi->cur_comm = comm_type;
if (spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
@@ -1635,8 +1641,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
goto out;
}
- spi->cur_xferlen = transfer->len;
-
dev_dbg(spi->dev, "transfer communication mode set to %d\n",
spi->cur_comm);
dev_dbg(spi->dev,
@@ -1996,6 +2000,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
return 0;
}
@@ -2007,13 +2013,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
clk_disable_unprepare(spi->clk);
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int stm32_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
return clk_prepare_enable(spi->clk);
}
@@ -2043,10 +2054,23 @@ static int stm32_spi_resume(struct device *dev)
return ret;
ret = spi_master_resume(master);
- if (ret)
+ if (ret) {
clk_disable_unprepare(spi->clk);
+ return ret;
+ }
- return ret;
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(dev, "Unable to power device:%d\n", ret);
+ return ret;
+ }
+
+ spi->cfg->config(spi);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
}
#endif
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6626587e77b4..dc12af018350 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
*/
static DEFINE_MUTEX(board_lock);
+/*
+ * Prevents addition of devices with same chip select and
+ * addition of devices below an unregistering controller.
+ */
+static DEFINE_MUTEX(spi_add_lock);
+
/**
* spi_alloc_device - Allocate a new SPI device
* @ctlr: Controller to which device is connected
@@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data)
*/
int spi_add_device(struct spi_device *spi)
{
- static DEFINE_MUTEX(spi_add_lock);
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
@@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
+ /* Controller may unregister concurrently */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+ !device_is_registered(&ctlr->dev)) {
+ status = -ENODEV;
+ goto done;
+ }
+
/* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
@@ -2795,6 +2807,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
struct spi_controller *found;
int id = ctlr->bus_num;
+ /* Prevent addition of new devices, unregister existing ones */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_lock(&spi_add_lock);
+
device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
@@ -2815,6 +2831,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
if (found == ctlr)
idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
+
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&spi_add_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 823dc99be46f..a8d2525e7af9 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
}
- /* Fall through */
+ fallthrough;
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
}
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 1ca2ac5ef2b8..354486b7ed3a 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -342,7 +342,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
break;
}
- /* fallthrough */
+ fallthrough;
case SSB_DEV_EXTIF:
set_irq(dev, 0);
break;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index b97a5c32d44a..f49ab1aa2149 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -228,7 +228,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus,
switch (bus->bustype) {
case SSB_BUSTYPE_SSB:
/* Only map the first core for now. */
- /* fallthrough... */
+ fallthrough;
case SSB_BUSTYPE_PCMCIA:
mmio = ioremap(baseaddr, SSB_CORE_SIZE);
break;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 8ea65bef35d2..a4e4eef55f35 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -4984,7 +4984,7 @@ enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) {
return MIPI_PORT1_ID + 1;
}
- /* fall through */
+ fallthrough;
default:
dev_err(isp->dev, "unsupported port: %d\n", port);
return MIPI_PORT0_ID;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index cccc5bfa1057..1b2b2c68025b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -704,14 +704,14 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
return false;
}
- /* fall-through */
+ fallthrough;
case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE:
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
pipe_id == IA_CSS_PIPE_ID_PREVIEW)
return true;
return false;
- /* fall-through */
+ fallthrough;
case ATOMISP_RUN_MODE_VIDEO:
if (!asd->continuous_mode->val) {
if (pipe_id == IA_CSS_PIPE_ID_VIDEO ||
@@ -720,7 +720,7 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
else
return false;
}
- /* fall through */
+ fallthrough;
case ATOMISP_RUN_MODE_SDV:
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
pipe_id == IA_CSS_PIPE_ID_VIDEO)
@@ -2765,7 +2765,7 @@ static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd,
if (!atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)) {
return IA_CSS_PIPE_ID_CAPTURE;
}
- /* fall through */
+ fallthrough;
case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
if (asd->yuvpp_mode)
return IA_CSS_PIPE_ID_YUVPP;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index f8d616f08b51..65b0c8a662a0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -1467,7 +1467,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd)
case ATOMISP_RUN_MODE_VIDEO:
return IA_CSS_PIPE_ID_VIDEO;
case ATOMISP_RUN_MODE_STILL_CAPTURE:
- /* fall through */
default:
return IA_CSS_PIPE_ID_CAPTURE;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index a000a1e316f7..0114b040247b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -1086,7 +1086,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
case RAW_CAMERA:
dev_dbg(isp->dev, "raw_index: %d\n", raw_index);
raw_index = isp->input_cnt;
- /* fall through */
+ fallthrough;
case SOC_CAMERA:
dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt);
if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 4fb9bfdd2f4c..f13af2329f48 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -660,7 +660,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
break;
}
- /* fall through */
+ fallthrough;
/*
* if dynamic memory pool doesn't exist, need to free
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 54434c2dbaf9..a68cbb4995f0 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -4510,7 +4510,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
#endif
pipe->stop_requested = false;
}
- /* fall through */
+ fallthrough;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 24041849384a..6386a3989bfe 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -110,7 +110,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
- /* fall-through */
+ fallthrough;
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index 7e9aad671489..f610fa5b4335 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -112,7 +112,7 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
- /* fall-through */
+ fallthrough;
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index d92fd804488e..21ebf7769696 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -488,7 +488,7 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
passthrough_cycles = incc->cycles;
break;
}
- /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */
+ fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */
default:
burst_size = (image.pix.width & 0xf) ? 8 : 16;
passthrough_bits = 16;
diff --git a/drivers/staging/media/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c
index 6e4df3335b1b..aa3ff67a3cb1 100644
--- a/drivers/staging/media/usbvision/usbvision-i2c.c
+++ b/drivers/staging/media/usbvision/usbvision-i2c.c
@@ -303,13 +303,13 @@ usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr,
switch (len) {
case 4:
buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4);
- /* fall through */
+ fallthrough;
case 3:
buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3);
- /* fall through */
+ fallthrough;
case 2:
buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2);
- /* fall through */
+ fallthrough;
case 1:
buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1);
break;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 30ea37e1a3f5..bd37f2afadea 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -444,7 +444,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
- /* fall through */
+ fallthrough;
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c9689610e186..cd045dc75a58 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3740,7 +3740,7 @@ check_rsp_state:
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
return -ECONNRESET;
- /* fall through */
+ fallthrough;
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
case ISTATE_SEND_NOPIN:
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 036940518bfe..27c85f260459 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -31,7 +31,7 @@ void iscsit_put_transport(struct iscsit_transport *t)
module_put(t->owner);
}
-int iscsit_register_transport(struct iscsit_transport *t)
+void iscsit_register_transport(struct iscsit_transport *t)
{
INIT_LIST_HEAD(&t->t_node);
@@ -40,8 +40,6 @@ int iscsit_register_transport(struct iscsit_transport *t)
mutex_unlock(&transport_mutex);
pr_debug("Registered iSCSI transport: %s\n", t->name);
-
- return 0;
}
EXPORT_SYMBOL(iscsit_register_transport);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8fc88654bff6..5f79ea05f9b8 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -345,7 +345,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
- /* fall through */
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
@@ -354,7 +354,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
- /* fall through */
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f1e81886122d..6e8b8d30938f 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -734,7 +734,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
}
if (!protect)
return TCM_NO_SENSE;
- /* Fallthrough */
+ fallthrough;
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
"PROTECT: 0x%02x\n", cdb[0], protect);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9fb0be0aa620..590eac2df909 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2236,7 +2236,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* fall through */
+ fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2431,7 +2431,7 @@ queue_rsp:
goto queue_full;
break;
}
- /* fall through */
+ fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index e9f0dda5ff92..a7ed56602c6c 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -537,7 +537,7 @@ static void ft_send_work(struct work_struct *work)
case FCP_PTA_ACA:
task_attr = TCM_ACA_TAG;
break;
- case FCP_PTA_SIMPLE: /* Fallthrough */
+ case FCP_PTA_SIMPLE:
default:
task_attr = TCM_SIMPLE_TAG;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a2e710d46432..b668224f906d 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -499,4 +499,15 @@ config SPRD_THERMAL
help
Support for the Spreadtrum thermal sensor driver in the Linux thermal
framework.
+
+config KHADAS_MCU_FAN_THERMAL
+ tristate "Khadas MCU controller FAN cooling support"
+ depends on OF || COMPILE_TEST
+ depends on MFD_KHADAS_MCU
+ select MFD_CORE
+ select REGMAP
+ help
+ If you say yes here you get support for the FAN controlled
+ by the Microcontroller found on the Khadas VIM boards.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index b8d96d26f9ec..b64dd50a6629 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -61,3 +61,4 @@ obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
+obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o
diff --git a/drivers/thermal/khadas_mcu_fan.c b/drivers/thermal/khadas_mcu_fan.c
new file mode 100644
index 000000000000..9eadd2d6413e
--- /dev/null
+++ b/drivers/thermal/khadas_mcu_fan.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Khadas MCU Controlled FAN driver
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/khadas-mcu.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+#define MAX_LEVEL 3
+
+struct khadas_mcu_fan_ctx {
+ struct khadas_mcu *mcu;
+ unsigned int level;
+ struct thermal_cooling_device *cdev;
+};
+
+static int khadas_mcu_fan_set_level(struct khadas_mcu_fan_ctx *ctx,
+ unsigned int level)
+{
+ int ret;
+
+ ret = regmap_write(ctx->mcu->regmap, KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
+ level);
+ if (ret)
+ return ret;
+
+ ctx->level = level;
+
+ return 0;
+}
+
+static int khadas_mcu_fan_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MAX_LEVEL;
+
+ return 0;
+}
+
+static int khadas_mcu_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct khadas_mcu_fan_ctx *ctx = cdev->devdata;
+
+ *state = ctx->level;
+
+ return 0;
+}
+
+static int
+khadas_mcu_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct khadas_mcu_fan_ctx *ctx = cdev->devdata;
+
+ if (state > MAX_LEVEL)
+ return -EINVAL;
+
+ if (state == ctx->level)
+ return 0;
+
+ return khadas_mcu_fan_set_level(ctx, state);
+}
+
+static const struct thermal_cooling_device_ops khadas_mcu_fan_cooling_ops = {
+ .get_max_state = khadas_mcu_fan_get_max_state,
+ .get_cur_state = khadas_mcu_fan_get_cur_state,
+ .set_cur_state = khadas_mcu_fan_set_cur_state,
+};
+
+static int khadas_mcu_fan_probe(struct platform_device *pdev)
+{
+ struct khadas_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
+ struct khadas_mcu_fan_ctx *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->mcu = mcu;
+ platform_set_drvdata(pdev, ctx);
+
+ cdev = devm_thermal_of_cooling_device_register(dev->parent,
+ dev->parent->of_node, "khadas-mcu-fan", ctx,
+ &khadas_mcu_fan_cooling_ops);
+ if (IS_ERR(cdev)) {
+ ret = PTR_ERR(cdev);
+ dev_err(dev, "Failed to register khadas-mcu-fan as cooling device: %d\n",
+ ret);
+ return ret;
+ }
+ ctx->cdev = cdev;
+ thermal_cdev_update(cdev);
+
+ return 0;
+}
+
+static void khadas_mcu_fan_shutdown(struct platform_device *pdev)
+{
+ struct khadas_mcu_fan_ctx *ctx = platform_get_drvdata(pdev);
+
+ khadas_mcu_fan_set_level(ctx, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int khadas_mcu_fan_suspend(struct device *dev)
+{
+ struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev);
+ unsigned int level_save = ctx->level;
+ int ret;
+
+ ret = khadas_mcu_fan_set_level(ctx, 0);
+ if (ret)
+ return ret;
+
+ ctx->level = level_save;
+
+ return 0;
+}
+
+static int khadas_mcu_fan_resume(struct device *dev)
+{
+ struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return khadas_mcu_fan_set_level(ctx, ctx->level);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(khadas_mcu_fan_pm, khadas_mcu_fan_suspend,
+ khadas_mcu_fan_resume);
+
+static const struct platform_device_id khadas_mcu_fan_id_table[] = {
+ { .name = "khadas-mcu-fan-ctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, khadas_mcu_fan_id_table);
+
+static struct platform_driver khadas_mcu_fan_driver = {
+ .probe = khadas_mcu_fan_probe,
+ .shutdown = khadas_mcu_fan_shutdown,
+ .driver = {
+ .name = "khadas-mcu-fan-ctrl",
+ .pm = &khadas_mcu_fan_pm,
+ },
+ .id_table = khadas_mcu_fan_id_table,
+};
+
+module_platform_driver(khadas_mcu_fan_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Khadas MCU FAN driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index e64db5f80d90..4ffa2e2c0145 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -220,7 +220,7 @@ static int calibrate_8916(struct tsens_priv *priv)
p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 3);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
@@ -355,7 +355,7 @@ static int calibrate_8974(struct tsens_priv *priv)
p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB:
case ONE_PT_CALIB2:
base1 = bkp[0] & BASE1_MASK;
@@ -390,7 +390,7 @@ static int calibrate_8974(struct tsens_priv *priv)
p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB:
case ONE_PT_CALIB2:
base1 = calib[0] & BASE1_MASK;
@@ -420,7 +420,7 @@ static int calibrate_8974(struct tsens_priv *priv)
p2[i] <<= 2;
p2[i] |= BIT_APPEND;
}
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
for (i = 0; i < priv->num_sensors; i++) {
p1[i] += base1;
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index b682a4df0081..3c19a3800c6d 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -202,7 +202,7 @@ static int calibrate_v1(struct tsens_priv *priv)
p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 2);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
@@ -263,7 +263,7 @@ static int calibrate_8976(struct tsens_priv *priv)
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 2);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK;
p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index f77ceae5c7d7..394a23ce6ca4 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -453,7 +453,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
"RX: checksum mismatch, dropping packet\n");
goto rx;
}
- /* Fall through */
+ fallthrough;
case TB_CFG_PKG_ICM_EVENT:
if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
goto rx;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 712395f518b8..3845db569e4c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2092,7 +2092,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (tb_route(sw))
return 0;
- /* fallthrough */
+ fallthrough;
case 3:
ret = tb_switch_set_uuid(sw);
if (ret)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 2aae2c76d880..1a7e849840b2 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -315,7 +315,7 @@ static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
switch (rate) {
default:
WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
- /* Fallthrough */
+ fallthrough;
case 1620:
val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
break;
@@ -355,7 +355,7 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
default:
WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
lanes);
- /* Fallthrough */
+ fallthrough;
case 1:
val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
break;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 2a0e51a20e34..92c9a476defc 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -492,7 +492,7 @@ static void xencons_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 21e76a2ec182..a8e19b4833bf 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -243,7 +243,7 @@ done:
/* Fall back to a 3 byte encoding */
word.bytes = 3;
word.word &= 0x00ffffff;
- /* Fall through */
+ fallthrough;
case 3:
/* 3 byte encoding */
word.word |= 0x82000000;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0a29a94ec438..35cf12147e39 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1584,7 +1584,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
gsm_process_modem(tty, dlci, modem, clen);
tty_kref_put(tty);
}
- /* Fall through */
+ fallthrough;
case 1: /* Line state will go via DLCI 0 controls only */
default:
tty_insert_flip_string(port, data, len);
@@ -1986,7 +1986,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
gsm->address = 0;
gsm->state = GSM_ADDRESS;
gsm->fcs = INIT_FCS;
- /* Fall through */
+ fallthrough;
case GSM_ADDRESS: /* Address continuation */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index b09eac4b6d64..8e975cb29833 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -602,7 +602,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
case TCOFLUSH:
flush_tx_queue(tty);
}
- /* fall through - to default */
+ fallthrough; /* to default */
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index f75696f0ee2d..934dd2fb2ec8 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -605,7 +605,6 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c)
}
break;
case R3964_WAIT_FOR_RX_REPEAT:
- /* FALLTHROUGH */
case R3964_IDLE:
if (c == STX) {
/* Prevent rx_queue from overflow: */
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index db88dee3a399..f8e99995eee9 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -39,7 +39,7 @@ static void serial8250_em_serial_out(struct uart_port *p, int offset, int value)
break;
case UART_IER: /* IER @ 0x04 */
value &= 0x0f; /* only 4 valid bits - not Xscale */
- /* fall-through */
+ fallthrough;
case UART_DLL_EM: /* DLL @ 0x24 (+9) */
case UART_DLM_EM: /* DLM @ 0x28 (+9) */
writel(value, p->membase + (offset << 2));
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 04b9af7ed941..2d0e7c7e408d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -744,6 +744,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_fastcom35x_2 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_4 = {
+ .num_ports = 4,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_8 = {
+ .num_ports = 8,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
@@ -811,9 +829,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x),
EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358),
EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358),
- EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_fastcom35x_2),
+ EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_fastcom35x_4),
+ EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_fastcom35x_8),
EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2),
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index d1d253c4b518..31c9e83ea3cb 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -255,7 +255,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
0);
- /* fall through */
+ fallthrough;
case CHIP_ID_F81865:
sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE,
F81866_IRQ_SHARE);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 1a74d511b02a..3eb2d485eaeb 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -631,7 +631,7 @@ pci_timedia_setup(struct serial_private *priv,
break;
case 3:
offset = board->uart_offset;
- /* FALLTHROUGH */
+ fallthrough;
case 4: /* BAR 2 */
case 5: /* BAR 3 */
case 6: /* BAR 4 */
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 09475695effd..c71d647eb87a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1872,7 +1872,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
switch (iir & 0x3f) {
case UART_IIR_RX_TIMEOUT:
serial8250_rx_dma_flush(up);
- /* fall-through */
+ fallthrough;
case UART_IIR_RLSI:
return true;
}
@@ -2275,6 +2275,10 @@ int serial8250_do_startup(struct uart_port *port)
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
+
+ if (port->irqflags & IRQF_SHARED)
+ disable_irq_nosync(port->irq);
+
/*
* Test for UARTs that do not reassert THRE when the
* transmitter is idle and the interrupt has already
@@ -2284,8 +2288,6 @@ int serial8250_do_startup(struct uart_port *port)
* allow register changes to become visible.
*/
spin_lock_irqsave(&port->lock, flags);
- if (up->port.irqflags & IRQF_SHARED)
- disable_irq_nosync(port->irq);
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
@@ -2297,9 +2299,10 @@ int serial8250_do_startup(struct uart_port *port)
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
- spin_unlock_irqrestore(&port->lock, flags);
/*
* If the interrupt is not reasserted, or we otherwise
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index e0b73a5402db..a2978abab0db 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -75,7 +75,7 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
break;
case UART_LCR:
valshift = 8;
- /* fall through */
+ fallthrough;
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
@@ -101,7 +101,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
case UART_SCR:
/* No SCR for this hardware. Use CHAR as a scratch register */
valshift = 8;
- /* fall through */
+ fallthrough;
case UART_FCR:
offset = UNIPHIER_UART_CHAR_FCR;
break;
@@ -109,7 +109,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
valshift = 8;
/* Divisor latch access bit does not exist. */
value &= ~UART_LCR_DLAB;
- /* fall through */
+ fallthrough;
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 8a0352eb337c..9409be982aa6 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -517,6 +517,7 @@ config SERIAL_IMX_CONSOLE
config SERIAL_IMX_EARLYCON
bool "Earlycon on IMX serial port"
+ depends on ARCH_MXC || COMPILE_TEST
depends on OF
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index d056ee6cca33..caf167f0c10a 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_SERIAL_ZS) += zs.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
obj-$(CONFIG_SERIAL_IMX) += imx.o
+obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o
obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
obj-$(CONFIG_SERIAL_ICOM) += icom.o
obj-$(CONFIG_SERIAL_MESON) += meson_uart.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c010f639298d..67498594d7d7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2241,9 +2241,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_disable(uap->clk);
}
-static void __init
-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
- int *parity, int *bits)
+static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
{
if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
unsigned int lcr_h, ibrd, fbrd;
@@ -2276,7 +2275,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
}
}
-static int __init pl011_console_setup(struct console *co, char *options)
+static int pl011_console_setup(struct console *co, char *options)
{
struct uart_amba_port *uap;
int baud = 38400;
@@ -2344,8 +2343,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
*
* Returns 0 if console matches; otherwise non-zero to use default matching
*/
-static int __init pl011_console_match(struct console *co, char *name, int idx,
- char *options)
+static int pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
{
unsigned char iotype;
resource_size_t addr;
@@ -2615,7 +2614,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
static int pl011_register_port(struct uart_amba_port *uap)
{
- int ret;
+ int ret, i;
/* Ensure interrupts from this UART are masked and cleared */
pl011_write(0, uap, REG_IMSC);
@@ -2626,6 +2625,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
if (ret < 0) {
dev_err(uap->port.dev,
"Failed to register AMBA-PL011 driver\n");
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
return ret;
}
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index e43471b33710..bb5fc8bdd57a 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1845,7 +1845,7 @@ static void atmel_get_ip_name(struct uart_port *port)
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
case 0x814: /* sama5d2 */
- /* fall through */
+ fallthrough;
case 0x701: /* sama5d4 */
atmel_port->fidi_min = 3;
atmel_port->fidi_max = 65535;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 8573fc9cb0cd..76b94d0ff586 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -587,7 +587,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
transmit_chars(up, lsr);
break;
case UART_IIR_RX_TIMEOUT:
- /* FALLTHROUGH */
case UART_IIR_RDI:
serial_omap_rdi(up, lsr);
break;
@@ -598,7 +597,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
/* simply try again */
break;
case UART_IIR_XOFF:
- /* FALLTHROUGH */
default:
break;
}
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3aa29d201f54..184b458820a3 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -361,11 +361,16 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
return NO_POLL_CHAR;
if (word_cnt == 1 && (status & RX_LAST))
+ /*
+ * NOTE: If RX_LAST_BYTE_VALID is 0 it needs to be
+ * treated as if it was BYTES_PER_FIFO_WORD.
+ */
private_data->poll_cached_bytes_cnt =
(status & RX_LAST_BYTE_VALID_MSK) >>
RX_LAST_BYTE_VALID_SHFT;
- else
- private_data->poll_cached_bytes_cnt = 4;
+
+ if (private_data->poll_cached_bytes_cnt == 0)
+ private_data->poll_cached_bytes_cnt = BYTES_PER_FIFO_WORD;
private_data->poll_cached_bytes =
readl(uport->membase + SE_GENI_RX_FIFOn);
@@ -1098,7 +1103,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
}
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
-static int __init qcom_geni_console_setup(struct console *co, char *options)
+static int qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index b5ef86ae2746..85366e059258 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -259,7 +259,7 @@ static void rda_uart_set_termios(struct uart_port *port,
case CS5:
case CS6:
dev_warn(port->dev, "bit size not supported, using 7 bits\n");
- /* Fall through */
+ fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
break;
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 8ed3482d2e1e..8ae3e03fbd8c 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -1905,9 +1905,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->tx_irq = ret + 1;
}
- ret = platform_get_irq(platdev, 1);
- if (ret > 0)
- ourport->tx_irq = ret;
+ if (!s3c24xx_serial_has_interrupt_mask(port)) {
+ ret = platform_get_irq(platdev, 1);
+ if (ret > 0)
+ ourport->tx_irq = ret;
+ }
/*
* DMA is currently supported only on DT platforms, if DMA properties
* are specified.
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index b87914ae6da8..bd13014a1c53 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -876,7 +876,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
tegra_uart_write(tup, ier, UART_IER);
break;
}
- /* Fall through */
+ fallthrough;
case 2: /* Receive */
if (!tup->use_rx_pio) {
is_rx_start = tup->rx_in_progress;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3403dd790517..f797c971cd82 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2101,7 +2101,7 @@ uart_set_options(struct uart_port *port, struct console *co,
switch (parity) {
case 'o': case 'O':
termios.c_cflag |= PARODD;
- /*fall through*/
+ fallthrough;
case 'e': case 'E':
termios.c_cflag |= PARENB;
break;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 143300a80090..ba503dd04ce2 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -970,7 +970,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
return ret;
if (stm32port->info->cfg.has_wakeup) {
- stm32port->wakeirq = platform_get_irq(pdev, 1);
+ stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
return stm32port->wakeirq ? : -ENODEV;
}
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 8ce9a7a256e5..319e5ceb6130 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -514,7 +514,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
switch (ret) {
case 2:
sunsu_change_mouse_baud(up);
- /* fallthru */
+ fallthrough;
case 1:
break;
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 7ea06bbc6197..001e19d7c17d 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -306,7 +306,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
switch (ret) {
case 2:
sunzilog_change_mouse_baud(up);
- /* fallthru */
+ fallthrough;
case 1:
break;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 2833f1418d6d..a9b1ee27183a 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -544,7 +544,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port,
cdns_uart->baud);
- /* fall through */
+ fallthrough;
case ABORT_RATE_CHANGE:
if (!locked)
spin_lock_irqsave(&cdns_uart->port->lock, flags);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9245fffdbceb..e18f318586ab 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -866,7 +866,7 @@ static int __tty_perform_flush(struct tty_struct *tty, unsigned long arg)
ld->ops->flush_buffer(tty);
tty_unthrottle(tty);
}
- /* fall through */
+ fallthrough;
case TCOFLUSH:
tty_driver_flush_buffer(tty);
break;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ccb533fd00a2..19cd4a4b1939 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1201,7 +1201,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
- unsigned short *newscreen;
+ unsigned short *oldscreen, *newscreen;
struct uni_screen *new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
@@ -1299,10 +1299,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin);
- kfree(vc->vc_screenbuf);
+ oldscreen = vc->vc_screenbuf;
vc->vc_screenbuf = newscreen;
vc->vc_screenbuf_size = new_screen_size;
set_origin(vc);
+ kfree(oldscreen);
/* do part of a reset_terminal() */
vc->vc_top = 0;
@@ -1553,7 +1554,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* include scrollback */
flush_scrollback(vc);
- /* fallthrough */
+ fallthrough;
case 2: /* erase whole display */
vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
count = vc->vc_cols * vc->vc_rows;
@@ -2167,7 +2168,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
lf(vc);
if (!is_kbd(vc, lnm))
return;
- /* fall through */
+ fallthrough;
case 13:
cr(vc);
return;
@@ -2306,7 +2307,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
}
vc->vc_priv = EPecma;
- /* fall through */
+ fallthrough;
case ESgetpars:
if (c == ';' && vc->vc_npar < NPAR - 1) {
vc->vc_npar++;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 91c301775047..a4e520bdd521 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -806,12 +806,22 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
console_lock();
vcp = vc_cons[i].d;
if (vcp) {
+ int ret;
+ int save_scan_lines = vcp->vc_scan_lines;
+ int save_font_height = vcp->vc_font.height;
+
if (v.v_vlin)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_font.height = v.v_clin;
vcp->vc_resize_user = 1;
- vc_resize(vcp, v.v_cols, v.v_rows);
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ if (ret) {
+ vcp->vc_scan_lines = save_scan_lines;
+ vcp->vc_font.height = save_font_height;
+ console_unlock();
+ return ret;
+ }
}
console_unlock();
}
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index f7f6229082ca..60f4711717d2 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -710,7 +710,8 @@ static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
if (ret)
return ret;
break;
- } /* else fallthrough */
+ }
+ fallthrough;
case STATUS_STAGE:
pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 991786876dbb..7f6f3ab5b8a6 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb)
if (current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
- if (acm->nb_size) {
- kfree(acm->notification_buffer);
- acm->nb_size = 0;
- }
+ u8 *new_buffer;
alloc_size = roundup_pow_of_two(expected_size);
- /*
- * kmalloc ensures a valid notification_buffer after a
- * use of kfree in case the previous allocation was too
- * small. Final freeing is done on disconnect.
- */
- acm->notification_buffer =
- kmalloc(alloc_size, GFP_ATOMIC);
- if (!acm->notification_buffer)
+ /* Final freeing is done on disconnect. */
+ new_buffer = krealloc(acm->notification_buffer,
+ alloc_size, GFP_ATOMIC);
+ if (!new_buffer) {
+ acm->nb_index = 0;
goto exit;
+ }
+
+ acm->notification_buffer = new_buffer;
acm->nb_size = alloc_size;
+ dr = (struct usb_cdc_notification *)acm->notification_buffer;
}
copy_size = min(current_size,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f81606c6a35b..7e73e989645b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -905,6 +905,35 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static bool is_dev_usb_generic_driver(struct device *dev)
+{
+ struct usb_device_driver *udd = dev->driver ?
+ to_usb_device_driver(dev->driver) : NULL;
+
+ return udd == &usb_generic_driver;
+}
+
+static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
+{
+ struct usb_device_driver *new_udriver = data;
+ struct usb_device *udev;
+ int ret;
+
+ if (!is_dev_usb_generic_driver(dev))
+ return 0;
+
+ udev = to_usb_device(dev);
+ if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
+ (!new_udriver->match || new_udriver->match(udev) != 0))
+ return 0;
+
+ ret = device_reprobe(dev);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to reprobe device (error %d)\n", ret);
+
+ return 0;
+}
+
/**
* usb_register_device_driver - register a USB device (not interface) driver
* @new_udriver: USB operations for the device driver
@@ -934,13 +963,20 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
retval = driver_register(&new_udriver->drvwrap.driver);
- if (!retval)
+ if (!retval) {
pr_info("%s: registered new device driver %s\n",
usbcore_name, new_udriver->name);
- else
+ /*
+ * Check whether any device could be better served with
+ * this new driver
+ */
+ bus_for_each_dev(&usb_bus_type, NULL, new_udriver,
+ __usb_bus_reprobe_drivers);
+ } else {
printk(KERN_ERR "%s: error %d registering device "
" driver %s\n",
usbcore_name, retval, new_udriver->name);
+ }
return retval;
}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index b6f2d4b44754..2b2f1ab6e36a 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -205,8 +205,9 @@ static int __check_usb_generic(struct device_driver *drv, void *data)
udrv = to_usb_device_driver(drv);
if (udrv == &usb_generic_driver)
return 0;
-
- return usb_device_match_id(udev, udrv->id_table) != NULL;
+ if (usb_device_match_id(udev, udrv->id_table) != NULL)
+ return 1;
+ return (udrv->match && udrv->match(udev));
}
static bool usb_generic_driver_match(struct usb_device *udev)
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 4dc443aaef5c..ec0d6c50610c 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -315,11 +315,14 @@ EXPORT_SYMBOL_GPL(usb_hcd_pci_probe);
void usb_hcd_pci_remove(struct pci_dev *dev)
{
struct usb_hcd *hcd;
+ int hcd_driver_flags;
hcd = pci_get_drvdata(dev);
if (!hcd)
return;
+ hcd_driver_flags = hcd->driver->flags;
+
if (pci_dev_run_wake(dev))
pm_runtime_get_noresume(&dev->dev);
@@ -347,7 +350,7 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
up_read(&companions_rwsem);
}
usb_put_hcd(hcd);
- if ((hcd->driver->flags & HCD_MASK) < HCD_USB3)
+ if ((hcd_driver_flags & HCD_MASK) < HCD_USB3)
pci_free_irq_vectors(dev);
pci_disable_device(dev);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 052d5accfe9b..5b768b80d1ee 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -727,7 +727,7 @@ static void hub_irq(struct urb *urb)
if ((++hub->nerrors < 10) || hub->error)
goto resubmit;
hub->error = status;
- /* FALL THROUGH */
+ fallthrough;
/* let hub_wq handle things */
case 0: /* we got data: port status changed */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 7c1198f80c23..f232914de5fd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
+ /* Sound Devices MixPre-D */
+ { USB_DEVICE(0x0926, 0x0208), .driver_info =
+ USB_QUIRK_ENDPOINT_IGNORE },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -465,6 +469,8 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
@@ -509,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
*/
static const struct usb_device_id usb_endpoint_ignore[] = {
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
{ }
};
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 422aea24afcd..2eb34c8b4065 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -646,9 +646,8 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- /* FALLTHROUGH */
default:
break;
}
@@ -1411,7 +1410,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
default:
dev_err(dev, "invalid maximum_speed parameter %d\n",
dwc->maximum_speed);
- /* fall through */
+ fallthrough;
case USB_SPEED_UNKNOWN:
/* default to superspeed */
dwc->maximum_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e44bfc3b5096..c2a0f64f8d1e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1054,27 +1054,25 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned chain, unsigned node)
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
- unsigned int length;
dma_addr_t dma;
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
unsigned is_last = req->request.is_last;
- if (req->request.num_sgs > 0) {
- length = sg_dma_len(req->start_sg);
+ if (req->request.num_sgs > 0)
dma = sg_dma_address(req->start_sg);
- } else {
- length = req->request.length;
+ else
dma = req->request.dma;
- }
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1086,7 +1084,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
+ __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
stream_id, short_not_ok, no_interrupt, is_last);
}
@@ -1096,16 +1094,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
struct scatterlist *sg = req->start_sg;
struct scatterlist *s;
int i;
-
+ unsigned int length = req->request.length;
unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs;
+ /*
+ * If we resume preparing the request, then get the remaining length of
+ * the request and resume where we left off.
+ */
+ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
+ length -= sg_dma_len(s);
+
for_each_sg(sg, s, remaining, i) {
- unsigned int length = req->request.length;
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
+ unsigned int trb_length;
unsigned chain = true;
+ trb_length = min_t(unsigned int, length, sg_dma_len(s));
+
+ length -= trb_length;
+
/*
* IOMMU driver is coalescing the list of sgs which shares a
* page boundary into one and giving it to USB driver. With
@@ -1113,7 +1122,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
* sgs passed. So mark the chain bit to false if it isthe last
* mapped sg.
*/
- if (i == remaining - 1)
+ if ((i == remaining - 1) || !length)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
@@ -1123,7 +1132,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1134,8 +1143,39 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
+ } else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ !rem && !chain) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->needs_extra_trb = true;
+
+ /* Prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
+
+ /* Prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+ !req->direction, 1,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+
+ /* Prepare one more TRB to handle MPS alignment */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, chain, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
}
/*
@@ -1150,6 +1190,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->num_queued_sgs++;
+ /*
+ * The number of pending SG entries may not correspond to the
+ * number of mapped SG entries. If all the data are queued, then
+ * don't include unused SG entries.
+ */
+ if (length == 0) {
+ req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+ break;
+ }
+
if (!dwc3_calc_trbs_left(dep))
break;
}
@@ -1169,7 +1219,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1180,6 +1230,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->request.no_interrupt,
req->request.is_last);
} else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
@@ -1187,18 +1238,29 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
- /* Now prepare one extra TRB to handle ZLP */
+ /* Prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- false, 1, req->request.stream_id,
+ !req->direction, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
+
+ /* Prepare one more TRB to handle MPS alignment for OUT */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, false, 0);
+ dwc3_prepare_one_trb(dep, req, length, false, 0);
}
}
@@ -2671,8 +2733,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
status);
if (req->needs_extra_trb) {
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
+
+ /* Reclaim MPS padding TRB for ZLP */
+ if (!req->direction && req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (IS_ALIGNED(req->request.length, maxp)))
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
+
req->needs_extra_trb = false;
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 331c951d72dc..950c9435beec 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2039,7 +2039,6 @@ static int do_scsi_command(struct fsg_common *common)
case RELEASE:
case RESERVE:
case SEND_DIAGNOSTIC:
- fallthrough;
default:
unknown_cmnd:
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 1d900081b1f0..b4206b0dede5 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port,
int ndp_index;
unsigned dg_len, dg_len2;
unsigned ndp_len;
+ unsigned block_len;
struct sk_buff *skb2;
int ret = -EINVAL;
- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
+ bool ndp_after_header;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
@@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port,
}
tmp++; /* skip wSequence */
+ block_len = get_ncm(&tmp, opts->block_length);
/* (d)wBlockLength */
- if (get_ncm(&tmp, opts->block_length) > max_size) {
+ if (block_len > ntb_max) {
INFO(port->func.config->cdev, "OUT size exceeded\n");
goto err;
}
ndp_index = get_ncm(&tmp, opts->ndp_index);
+ ndp_after_header = false;
/* Run through all the NDP's in the NTB */
do {
- /* NCM 3.2 */
- if (((ndp_index % 4) != 0) &&
- (ndp_index < opts->nth_size)) {
+ /*
+ * NCM 3.2
+ * dwNdpIndex
+ */
+ if (((ndp_index % 4) != 0) ||
+ (ndp_index < opts->nth_size) ||
+ (ndp_index > (block_len -
+ opts->ndp_size))) {
INFO(port->func.config->cdev, "Bad index: %#X\n",
ndp_index);
goto err;
}
+ if (ndp_index == opts->nth_size)
+ ndp_after_header = true;
- /* walk through NDP */
+ /*
+ * walk through NDP
+ * dwSignature
+ */
tmp = (void *)(skb->data + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
@@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len = get_unaligned_le16(tmp++);
/*
* NCM 3.3.1
+ * wLength
* entry is 2 items
* item size is 16/32 bits, opts->dgram_item_len * 2 bytes
* minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
* Each entry is a dgram index and a dgram length.
*/
if ((ndp_len < opts->ndp_size
- + 2 * 2 * (opts->dgram_item_len * 2))
- || (ndp_len % opts->ndplen_align != 0)) {
+ + 2 * 2 * (opts->dgram_item_len * 2)) ||
+ (ndp_len % opts->ndplen_align != 0)) {
INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
ndp_len);
goto err;
@@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port,
do {
index = index2;
+ /* wDatagramIndex[0] */
+ if ((index < opts->nth_size) ||
+ (index > block_len - opts->dpe_size)) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index);
+ goto err;
+ }
+
dg_len = dg_len2;
- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
+ /*
+ * wDatagramLength[0]
+ * ethernet hdr + crc or larger than max frame size
+ */
+ if ((dg_len < 14 + crc_len) ||
+ (dg_len > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
@@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port,
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+ if (index2 == 0 || dg_len2 == 0)
+ break;
+
+ /* wDatagramIndex[1] */
+ if (ndp_after_header) {
+ if (index2 < opts->nth_size + opts->ndp_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+ } else {
+ if (index2 < opts->nth_size + opts->dpe_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+ }
+ if (index2 > block_len - opts->dpe_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+
+ /* wDatagramLength[1] */
+ if ((dg_len2 < 14 + crc_len) ||
+ (dg_len2 > frame_max)) {
+ INFO(port->func.config->cdev,
+ "Bad dgram length: %#X\n", dg_len);
+ goto err;
+ }
+
/*
* Copy the data into a new skb.
* This ensures the truesize is correct
@@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++;
-
- if (index2 == 0 || dg_len2 == 0)
- break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d94b814328c8..184165e27908 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
goto err_sts;
return 0;
+
err_sts:
- usb_ep_free_request(fu->ep_status, stream->req_status);
- stream->req_status = NULL;
-err_out:
usb_ep_free_request(fu->ep_out, stream->req_out);
stream->req_out = NULL;
+err_out:
+ usb_ep_free_request(fu->ep_in, stream->req_in);
+ stream->req_in = NULL;
out:
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index eaa13fd3dc7f..e313c3b8dcb1 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -14,6 +14,7 @@
#define __U_F_H__
#include <linux/usb/gadget.h>
+#include <linux/overflow.h>
/* Variable Length Array Macros **********************************************/
#define vla_group(groupname) size_t groupname##__next = 0
@@ -21,21 +22,36 @@
#define vla_item(groupname, type, name, n) \
size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = (n) * sizeof(type); \
- groupname##__next = offset + size; \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t size = array_size(n, sizeof(type)); \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, size, \
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
offset; \
})
#define vla_item_with_sz(groupname, type, name, n) \
- size_t groupname##_##name##__sz = (n) * sizeof(type); \
- size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = groupname##_##name##__sz; \
- groupname##__next = offset + size; \
- offset; \
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
})
#define vla_ptr(ptr, groupname, name) \
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index fa6793065c7c..a6426dd1cfef 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -328,7 +328,7 @@ static int usba_config_fifo_table(struct usba_udc *udc)
switch (fifo_mode) {
default:
fifo_mode = 0;
- /* fall through */
+ fallthrough;
case 0:
udc->fifo_cfg = NULL;
n = 0;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index b2638e83bb49..a6f7b2594c09 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -250,7 +250,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
break;
case FSL_USB2_PHY_UTMI_WIDE:
portctrl |= PORTSCX_PTW_16BIT;
- /* fall through */
+ fallthrough;
case FSL_USB2_PHY_UTMI:
case FSL_USB2_PHY_UTMI_DUAL:
if (udc->pdata->have_sysif_regs) {
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index cfafdd92c2a8..10324a7334fe 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2340,12 +2340,12 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
case PXA250_A0:
case PXA250_A1:
/* A0/A1 "not released"; ep 13, 15 unusable */
- /* fall through */
+ fallthrough;
case PXA250_B2: case PXA210_B2:
case PXA250_B1: case PXA210_B1:
case PXA250_B0: case PXA210_B0:
/* OUT-DMA is broken ... */
- /* fall through */
+ fallthrough;
case PXA250_C0: case PXA210_C0:
break;
#elif defined(CONFIG_ARCH_IXP4XX)
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index a87c0b26279e..3055d9abfec3 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1019,7 +1019,7 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp116x->lock, flags);
- /* fall through */
+ fallthrough;
case C_HUB_LOCAL_POWER:
DBG("C_HUB_LOCAL_POWER\n");
break;
@@ -1421,10 +1421,10 @@ static int isp116x_bus_suspend(struct usb_hcd *hcd)
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) |
HCCONTROL_USB_RESET);
- /* fall through */
+ fallthrough;
case HCCONTROL_USB_RESET:
ret = -EBUSY;
- /* fall through */
+ fallthrough;
default: /* HCCONTROL_USB_SUSPEND */
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index bd40e597f256..5f5e8a64c8e2 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index b8961c0381cf..8c1bbac6d136 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -957,7 +957,8 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
case 0: /* Illegal reserved cap, set cap=0 so we exit */
- cap = 0; /* fall through */
+ cap = 0;
+ fallthrough;
default:
dev_warn(&pdev->dev,
"EHCI: unrecognized capability %02x\n",
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index fcc5ac5ce8b1..ccb0156fcebe 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -699,7 +699,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
- /* FALLTHROUGH */
+ fallthrough;
case COMP_SHORT_PACKET:
status = 0;
break;
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 92e25a62fdb5..c88bffd68742 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -274,7 +274,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
{
- int dci;
+ int ep_index;
dma_addr_t dma;
struct xhci_hcd *xhci;
struct xhci_ep_ctx *ep_ctx;
@@ -283,9 +283,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
- for (dci = 1; dci < 32; dci++) {
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
- dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+ for (ep_index = 0; ep_index < 31; ep_index++) {
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index c3554e37e09f..c799ca5361d4 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -740,15 +740,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
{
u32 pls = status_reg & PORT_PLS_MASK;
- /* resume state is a xHCI internal state.
- * Do not report it to usb core, instead, pretend to be U3,
- * thus usb core knows it's not ready for transfer
- */
- if (pls == XDEV_RESUME) {
- *status |= USB_SS_PORT_LS_U3;
- return;
- }
-
/* When the CAS bit is set then warm reset
* should be performed on port
*/
@@ -771,6 +762,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
pls |= USB_PORT_STAT_CONNECTION;
} else {
/*
+ * Resume state is an xHCI internal state. Do not report it to
+ * usb core, instead, pretend to be U3, thus usb core knows
+ * it's not ready for transfer.
+ */
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
+ return;
+ }
+
+ /*
* If CAS bit isn't set but the Port is already at
* Compliance Mode, fake a connection so the USB core
* notices the Compliance state and resets the port.
@@ -1483,7 +1484,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
- /* fall through */
+ fallthrough;
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 696fad50b478..fe405cd38dbc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1311,7 +1311,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
- /* Fall through - SS and HS isoc/int have same decoding */
+ fallthrough; /* SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
@@ -1331,7 +1331,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* since it uses the same rules as low speed interrupt
* endpoints.
*/
- /* fall through */
+ fallthrough;
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 59b1965ad0a3..f97ac9f52bf4 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -50,20 +50,6 @@
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
-#define ROM_VALID_01 0x2013
-#define ROM_VALID_02 0x2026
-
-static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version)
-{
- switch (version) {
- case ROM_VALID_01:
- case ROM_VALID_02:
- return 0;
- }
- dev_err(&pdev->dev, "FW has invalid version :%d\n", version);
- return -EINVAL;
-}
-
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
@@ -202,10 +188,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
version &= RENESAS_FW_VERSION_FIELD;
version = version >> RENESAS_FW_VERSION_OFFSET;
-
- err = renesas_verify_fw_version(pdev, version);
- if (err)
- return err;
+ dev_dbg(&pdev->dev, "Found ROM version: %x\n", version);
/*
* Test if ROM is present and loaded, if so we can skip everything
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2c255d0620b0..a741a38a4c69 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2103,7 +2103,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
trb_comp_code, ep_index);
- /* else fall through */
+ fallthrough;
case COMP_STALL_ERROR:
/* Did we transfer part of the data (middle) phase? */
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 014d79334f50..190923d8b246 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1136,7 +1136,7 @@ static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name,
unsigned int i, phy_count = 0;
for (i = 0; i < tegra->soc->num_types; i++) {
- if (!strncmp(tegra->soc->phy_types[i].name, "usb2",
+ if (!strncmp(tegra->soc->phy_types[i].name, name,
strlen(name)))
return tegra->phys[phy_count+port];
@@ -1258,6 +1258,8 @@ static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra)
INIT_WORK(&tegra->id_work, tegra_xhci_id_work);
tegra->id_nb.notifier_call = tegra_xhci_id_notify;
+ tegra->otg_usb2_port = -EINVAL;
+ tegra->otg_usb3_port = -EINVAL;
for (i = 0; i < tegra->num_usb_phys; i++) {
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3c41b14ecce7..f4cedcaee14b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3236,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(cfg_cmd->completion);
- ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
+ if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
+ ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
@@ -4618,7 +4619,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout(
break;
}
/* Otherwise the calculation is the same as isoc eps */
- /* fall through */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 407fe7570f3b..f8686139d6f3 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
- return ret;
+ return ret < 0 ? ret : -EINVAL;
}
/* submit urb to poll interrupt endpoint */
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6e7d34e7fec4..b2e09883c7e2 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
dev->cntl_buffer[0]);
- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
if (retval >= 0)
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index c545b27ea568..edb5b63d7063 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -975,7 +975,7 @@ static int cppi_channel_program(struct dma_channel *ch,
musb_dbg(musb, "%cX DMA%d not allocated!",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_DMA_STATUS_FREE:
break;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5a56a03996b1..849e0b770130 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -852,7 +852,7 @@ static void musb_handle_intr_suspend(struct musb *musb, u8 devctl)
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
- /* fall through */
+ fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
@@ -972,9 +972,8 @@ static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl)
case OTG_STATE_A_PERIPHERAL:
musb_hnp_stop(musb);
musb_root_disconnect(musb);
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_B_WAIT_ACON:
- /* FALLTHROUGH */
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_g_disconnect(musb);
@@ -1009,7 +1008,7 @@ static void musb_handle_intr_reset(struct musb *musb)
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
musb_g_reset(musb);
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
musb_dbg(musb, "HNP: in %s, %d msec timeout",
@@ -1030,7 +1029,7 @@ static void musb_handle_intr_reset(struct musb *musb)
break;
case OTG_STATE_B_IDLE:
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
break;
@@ -1471,7 +1470,7 @@ static int ep_config_from_table(struct musb *musb)
switch (fifo_mode) {
default:
fifo_mode = 0;
- /* FALLTHROUGH */
+ fallthrough;
case 0:
cfg = mode_0_cfg;
n = ARRAY_SIZE(mode_0_cfg);
@@ -2018,7 +2017,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
musb->quirk_retries--;
return;
}
- /* fall through */
+ fallthrough;
case MUSB_QUIRK_A_DISCONNECT_19:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 19556c1a8ae8..30085b2be7b9 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -232,7 +232,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
dsps_mod_timer_optional(glue);
break;
}
- /* fall through */
+ fallthrough;
case OTG_STATE_A_WAIT_BCON:
/* keep VBUS on for host-only mode */
@@ -242,7 +242,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
}
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
- /* fall through */
+ fallthrough;
case OTG_STATE_A_IDLE:
case OTG_STATE_B_IDLE:
@@ -793,7 +793,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
case USB_SPEED_SUPER:
dev_warn(dev, "ignore incorrect maximum_speed "
"(super-speed) setting in dts");
- /* fall through */
+ fallthrough;
default:
config->maximum_speed = USB_SPEED_HIGH;
}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 0ae3e0be043e..44d3cb02fa76 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -735,7 +735,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
musb_writeb(mbase, MUSB_TESTMODE,
musb->test_mode_nr);
}
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
@@ -767,7 +767,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
*/
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_STAGE_SETUP:
setup:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8b7d22a0c0fb..30c5e7de0761 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -360,7 +360,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
qh = first_qh(head);
break;
}
- /* fall through */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
@@ -1019,7 +1019,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_OUT:
fifo_count = min_t(size_t, qh->maxpacket,
urb->transfer_buffer_length -
@@ -2222,7 +2222,7 @@ static int musb_urb_enqueue(
interval = max_t(u8, epd->bInterval, 1);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
/* ISO always uses logarithmic encoding */
interval = min_t(u8, epd->bInterval, 16);
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index cb7ae297a3af..cafc69536e1d 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -211,7 +211,7 @@ void musb_root_disconnect(struct musb *musb)
musb->g.is_a_peripheral = 1;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_HOST:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d62c78b97cad..4232f1ce3fbf 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -104,7 +104,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
if (error)
break;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- /* Fall through */
+ fallthrough;
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_HOST:
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 99890d1bbfcb..c26683a2702b 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -464,7 +464,7 @@ static void musb_do_idle(struct timer_list *t)
dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
usb_otg_state_string(musb->xceiv->otg->state));
}
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_IDLE:
tusb_musb_set_vbus(musb, 0);
default:
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index d4ee3cb721ea..f6d3731581eb 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -176,6 +176,7 @@ static int ingenic_usb_phy_init(struct usb_phy *phy)
/* Wait for PHY to reset */
usleep_range(30, 300);
+ reg = readl(priv->base + REG_USBPCR_OFFSET);
writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET);
usleep_range(300, 1000);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index c8a988d2cfdd..15dc25801cdc 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -592,7 +592,7 @@ static unsigned long sddr55_get_capacity(struct us_data *us) {
case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
- /* fall through */
+ fallthrough;
case 0x5d: // 5d is a ROM card with pagesize 512.
return 0x00200000;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d592071119ba..08f9296431e9 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -688,7 +688,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
- /* fall through */
+ fallthrough;
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 220ae2c356ee..5732e9691f08 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_BROKEN_FUA ),
+ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 162b09d69f62..711ab240058c 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -28,6 +28,13 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/* Reported-by: Till Dörges <doerges@pre-sense.de> */
+UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
+ "Sony",
+ "PSZ-HA*",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Julian Groß <julian.g@posteo.de> */
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
@@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f57d91fd0e09..bd80e03b2b6f 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -157,7 +157,7 @@ static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
case 0x3:
if (sink)
return TYPEC_CC_RP_3_0;
- /* fall through */
+ fallthrough;
case 0x0:
default:
return TYPEC_CC_OPEN;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 3ef37202ee37..a48e3f90d196 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3372,13 +3372,31 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
break;
case SRC_HARD_RESET_VBUS_OFF:
- tcpm_set_vconn(port, true);
+ /*
+ * 7.1.5 Response to Hard Resets
+ * Hard Reset Signaling indicates a communication failure has occurred and the
+ * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
+ * drive VBUS to vSafe0V as shown in Figure 7-9.
+ */
+ tcpm_set_vconn(port, false);
tcpm_set_vbus(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
tcpm_data_role_for_source(port));
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
+ /*
+ * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
+ * PD_T_SRC_RECOVER before turning vbus back on.
+ * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
+ * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
+ * tells the Device Policy Manager to instruct the power supply to perform a
+ * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
+ * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
+ * re-establish communication with the Sink and resume USB Default Operation.
+ * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
+ */
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
break;
case SRC_HARD_RESET_VBUS_ON:
+ tcpm_set_vconn(port, true);
tcpm_set_vbus(port, true);
port->tcpc->set_pd_rx(port->tcpc, true);
tcpm_set_attached_state(port, true);
@@ -3944,7 +3962,11 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
break;
case SRC_HARD_RESET_VBUS_OFF:
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
+ /*
+ * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
+ * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
+ */
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
break;
case HARD_RESET_SEND:
break;
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 048381c058a5..261131c9e37c 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -288,8 +288,6 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
struct typec_altmode *alt;
struct ucsi_dp *dp;
- mutex_lock(&con->lock);
-
/* We can't rely on the firmware with the capabilities. */
desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
@@ -298,15 +296,12 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
desc->vdo |= all_assignments << 16;
alt = typec_port_register_altmode(con->port, desc);
- if (IS_ERR(alt)) {
- mutex_unlock(&con->lock);
+ if (IS_ERR(alt))
return alt;
- }
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp) {
typec_unregister_altmode(alt);
- mutex_unlock(&con->lock);
return ERR_PTR(-ENOMEM);
}
@@ -319,7 +314,5 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
alt->ops = &ucsi_displayport_ops;
typec_altmode_set_drvdata(alt, dp);
- mutex_unlock(&con->lock);
-
return alt;
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index affd024190c9..e680fcfdee60 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -146,40 +146,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, u64 command,
- void *data, size_t size)
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ void *data, size_t size)
{
u8 length;
int ret;
+ mutex_lock(&ucsi->ppm_lock);
+
ret = ucsi_exec_command(ucsi, command);
if (ret < 0)
- return ret;
+ goto out;
length = ret;
if (data) {
ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- return ret;
+ goto out;
}
ret = ucsi_acknowledge_command(ucsi);
if (ret)
- return ret;
-
- return length;
-}
-
-int ucsi_send_command(struct ucsi *ucsi, u64 command,
- void *retval, size_t size)
-{
- int ret;
+ goto out;
- mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, command, retval, size);
+ ret = length;
+out:
mutex_unlock(&ucsi->ppm_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_send_command);
@@ -205,7 +198,7 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
int i;
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
+ ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
@@ -354,7 +347,7 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+ len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
/*
* We are collecting all altmodes first and then registering.
* Some type-C device will return zero length data beyond last
@@ -431,7 +424,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
+ len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
@@ -502,7 +495,7 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
command |= UCSI_GET_PDOS_SRC_PDOS;
- ret = ucsi_run_command(ucsi, command, con->src_pdos,
+ ret = ucsi_send_command(ucsi, command, con->src_pdos,
sizeof(con->src_pdos));
if (ret < 0) {
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
@@ -681,7 +674,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
*/
command = UCSI_GET_CAM_SUPPORTED;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ucsi_run_command(con->ucsi, command, NULL, 0);
+ ucsi_send_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
@@ -736,20 +729,24 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
u32 cci;
int ret;
+ mutex_lock(&ucsi->ppm_lock);
+
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
- return ret;
+ goto out;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- if (time_is_before_jiffies(tmo))
- return -ETIMEDOUT;
+ if (time_is_before_jiffies(tmo)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- return ret;
+ goto out;
/* If the PPM is still doing something else, reset it again. */
if (cci & ~UCSI_CCI_RESET_COMPLETE) {
@@ -757,13 +754,15 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
&command,
sizeof(command));
if (ret < 0)
- return ret;
+ goto out;
}
msleep(20);
} while (!(cci & UCSI_CCI_RESET_COMPLETE));
- return 0;
+out:
+ mutex_unlock(&ucsi->ppm_lock);
+ return ret;
}
static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
@@ -775,9 +774,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
u64 c;
/* PPM most likely stopped responding. Resetting everything. */
- mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
- mutex_unlock(&con->ucsi->ppm_lock);
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
@@ -901,12 +898,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num = index + 1;
con->ucsi = ucsi;
+ /* Delay other interactions with the con until registration is complete */
+ mutex_lock(&con->lock);
+
/* Get connector capability */
command = UCSI_GET_CONNECTOR_CAPABILITY;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
+ ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
- return ret;
+ goto out;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
cap->data = TYPEC_PORT_DRD;
@@ -938,27 +938,32 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ret = ucsi_register_port_psy(con);
if (ret)
- return ret;
+ goto out;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
- if (IS_ERR(con->port))
- return PTR_ERR(con->port);
+ if (IS_ERR(con->port)) {
+ ret = PTR_ERR(con->port);
+ goto out;
+ }
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
- if (ret)
+ if (ret) {
dev_err(ucsi->dev, "con%d: failed to register alt modes\n",
con->num);
+ goto out;
+ }
/* Get the status */
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(ucsi, command, &con->status,
- sizeof(con->status));
+ ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
- return 0;
+ ret = 0;
+ goto out;
}
+ ret = 0; /* ucsi_send_command() returns length on success */
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
@@ -983,17 +988,21 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
- if (ret)
+ if (ret) {
dev_err(ucsi->dev,
"con%d: failed to register alternate modes\n",
con->num);
- else
+ ret = 0;
+ } else {
ucsi_altmode_update_active(con);
+ }
}
trace_ucsi_register_port(con->num, &con->status);
- return 0;
+out:
+ mutex_unlock(&con->lock);
+ return ret;
}
/**
@@ -1009,8 +1018,6 @@ static int ucsi_init(struct ucsi *ucsi)
int ret;
int i;
- mutex_lock(&ucsi->ppm_lock);
-
/* Reset the PPM */
ret = ucsi_reset_ppm(ucsi);
if (ret) {
@@ -1021,13 +1028,13 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable basic notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ret = ucsi_run_command(ucsi, command, NULL, 0);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
command = UCSI_GET_CAPABILITY;
- ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
+ ret = ucsi_send_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
@@ -1054,12 +1061,10 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable all notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ret = ucsi_run_command(ucsi, command, NULL, 0);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
- mutex_unlock(&ucsi->ppm_lock);
-
return 0;
err_unregister:
@@ -1074,8 +1079,6 @@ err_unregister:
err_reset:
ucsi_reset_ppm(ucsi);
err:
- mutex_unlock(&ucsi->ppm_lock);
-
return ret;
}
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 2305d425e6c9..9d7d642022d1 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -461,6 +461,11 @@ static void stub_disconnect(struct usb_device *udev)
return;
}
+static bool usbip_match(struct usb_device *udev)
+{
+ return true;
+}
+
#ifdef CONFIG_PM
/* These functions need usb_port_suspend and usb_port_resume,
@@ -486,6 +491,7 @@ struct usb_device_driver stub_driver = {
.name = "usbip-host",
.probe = stub_probe,
.disconnect = stub_disconnect,
+ .match = usbip_match,
#ifdef CONFIG_PM
.suspend = stub_suspend,
.resume = stub_resume,
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index d93a69b12f81..4271c408103e 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -29,4 +29,23 @@ config IFCVF
To compile this driver as a module, choose M here: the module will
be called ifcvf.
+config MLX5_VDPA
+ bool "MLX5 VDPA support library for ConnectX devices"
+ depends on MLX5_CORE
+ default n
+ help
+ Support library for Mellanox VDPA drivers. Provides code that is
+ common for all types of VDPA drivers. The following drivers are planned:
+ net, block.
+
+config MLX5_VDPA_NET
+ tristate "vDPA driver for ConnectX devices"
+ depends on MLX5_VDPA
+ default n
+ help
+ VDPA network driver for ConnectX6 and newer. Provides offloading
+ of virtio net datapath such that descriptors put on the ring will
+ be executed by the hardware. It also supports a variety of stateless
+ offloads depending on the actual device used and firmware version.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 8bbb686ca7a2..d160e9b63a66 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/
+obj-$(CONFIG_MLX5_VDPA) += mlx5/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 94bf0328b68d..f2a128e56de5 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw)
return 0;
}
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
{
struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr;
@@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
return last_avail_idx;
}
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
{
struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index f4554412e607..64696d63fe07 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -29,7 +29,7 @@
(1ULL << VIRTIO_F_VERSION_1) | \
(1ULL << VIRTIO_NET_F_STATUS) | \
(1ULL << VIRTIO_F_ORDER_PLATFORM) | \
- (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
(1ULL << VIRTIO_NET_F_MRG_RXBUF))
/* Only one queue pair for now. */
@@ -84,7 +84,7 @@ struct ifcvf_hw {
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
-
+ unsigned int config_irq;
};
struct ifcvf_adapter {
@@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_features(struct ifcvf_hw *hw);
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num);
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index f5a60c14b979..8b4028556cb6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -50,9 +50,12 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
int i;
- for (i = 0; i < queues; i++)
+ for (i = 0; i < queues; i++) {
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+ vf->vring[i].irq = -EINVAL;
+ }
+ devm_free_irq(&pdev->dev, vf->config_irq, vf);
ifcvf_free_irq_vectors(pdev);
}
@@ -72,10 +75,14 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
vector = 0;
- irq = pci_irq_vector(pdev, vector);
- ret = devm_request_irq(&pdev->dev, irq,
+ vf->config_irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, vf->config_irq,
ifcvf_config_changed, 0,
vf->config_msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request config irq\n");
+ return ret;
+ }
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
@@ -235,19 +242,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
return IFCVF_QUEUE_MAX;
}
-static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
+static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return ifcvf_get_vq_state(vf, qid);
+ state->avail_index = ifcvf_get_vq_state(vf, qid);
+ return 0;
}
static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
- u64 num)
+ const struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return ifcvf_set_vq_state(vf, qid, num);
+ return ifcvf_set_vq_state(vf, qid, state->avail_index);
}
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
@@ -352,6 +361,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
vf->config_cb.private = cb->private;
}
+static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
+ u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->vring[qid].irq;
+}
+
/*
* IFCVF currently does't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
@@ -369,6 +386,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_ready = ifcvf_vdpa_get_vq_ready,
.set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address,
+ .get_vq_irq = ifcvf_vdpa_get_vq_irq,
.kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id,
@@ -384,7 +402,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *dev = &pdev->dev;
struct ifcvf_adapter *adapter;
struct ifcvf_hw *vf;
- int ret;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
@@ -420,7 +438,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops);
+ dev, &ifc_vdpa_ops,
+ IFCVF_MAX_QUEUE_PAIRS * 2);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
@@ -441,6 +460,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ vf->vring[i].irq = -EINVAL;
+
ret = vdpa_register_device(&adapter->vdpa);
if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile
new file mode 100644
index 000000000000..89a5bededc9f
--- /dev/null
+++ b/drivers/vdpa/mlx5/Makefile
@@ -0,0 +1,4 @@
+subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core
+
+obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o
+mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
new file mode 100644
index 000000000000..5c92a576edae
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_H__
+#define __MLX5_VDPA_H__
+
+#include <linux/vdpa.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_vdpa_direct_mr {
+ u64 start;
+ u64 end;
+ u32 perm;
+ struct mlx5_core_mkey mr;
+ struct sg_table sg_head;
+ int log_size;
+ int nsg;
+ struct list_head list;
+ u64 offset;
+};
+
+struct mlx5_vdpa_mr {
+ struct mlx5_core_mkey mkey;
+
+ /* list of direct MRs descendants of this indirect mr */
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+ struct mutex mkey_mtx;
+};
+
+struct mlx5_vdpa_resources {
+ u32 pdn;
+ struct mlx5_uars_page *uar;
+ void __iomem *kick_addr;
+ u16 uid;
+ u32 null_mkey;
+ bool valid;
+};
+
+struct mlx5_vdpa_dev {
+ struct vdpa_device vdev;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_vdpa_resources res;
+
+ u64 mlx_features;
+ u64 actual_features;
+ u8 status;
+ u32 max_vqs;
+ u32 generation;
+
+ struct mlx5_vdpa_mr mr;
+};
+
+int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
+int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+ int inlen);
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey);
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ bool *change_map);
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
+
+#define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_info(__dev, format, ...) \
+ dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_dbg(__dev, format, ...) \
+ dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#endif /* __MLX5_VDPA_H__ */
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h
new file mode 100644
index 000000000000..f6f57a29b38e
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_IFC_H_
+#define __MLX5_VDPA_IFC_H_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+#endif /* __MLX5_VDPA_IFC_H_ */
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
new file mode 100644
index 000000000000..ef1c550f8266
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <linux/gcd.h>
+#include <linux/string.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
+#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
+({ \
+ u64 __s = _s; \
+ u64 _res; \
+ _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
+ _res; \
+})
+
+static int get_octo_len(u64 len, int page_shift)
+{
+ u64 page_size = 1ULL << page_shift;
+ int npages;
+
+ npages = ALIGN(len, page_size) >> page_shift;
+ return (npages + 1) / 2;
+}
+
+static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
+{
+ struct scatterlist *sg;
+ __be64 *pas;
+ int i;
+
+ pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+ (*pas) = cpu_to_be64(sg_dma_address(sg));
+}
+
+static void mlx5_set_access_mode(void *mkc, int mode)
+{
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
+}
+
+static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+ mtt[i] = cpu_to_be64(sg_dma_address(sg));
+}
+
+static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ int inlen;
+ void *mkc;
+ void *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+ fill_sg(mr, in);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
+ MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
+ mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET64(mkc, mkc, start_addr, mr->offset);
+ MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
+ MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(mr->end - mr->start, mr->log_size));
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(mr->end - mr->start, mr->log_size));
+ populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
+ kvfree(in);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
+}
+
+static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return max_t(u64, map->start, mr->start);
+}
+
+static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return min_t(u64, map->last + 1, mr->end);
+}
+
+static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return map_end(map, mr) - map_start(map, mr);
+}
+
+#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
+#define MLX5_VDPA_INVALID_LEN ((u64)-1)
+
+static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
+{
+ struct mlx5_vdpa_direct_mr *s;
+
+ s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+ if (!s)
+ return MLX5_VDPA_INVALID_START_ADDR;
+
+ return s->start;
+}
+
+static u64 indir_len(struct mlx5_vdpa_mr *mkey)
+{
+ struct mlx5_vdpa_direct_mr *s;
+ struct mlx5_vdpa_direct_mr *e;
+
+ s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+ if (!s)
+ return MLX5_VDPA_INVALID_LEN;
+
+ e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+
+ return e->end - s->start;
+}
+
+#define LOG_MAX_KLM_SIZE 30
+#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
+
+static u32 klm_bcount(u64 size)
+{
+ return (u32)size;
+}
+
+static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
+{
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_klm *klmarr;
+ struct mlx5_klm *klm;
+ bool first = true;
+ u64 preve;
+ int i;
+
+ klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ i = 0;
+ list_for_each_entry(dmr, &mkey->head, list) {
+again:
+ klm = &klmarr[i++];
+ if (first) {
+ preve = dmr->start;
+ first = false;
+ }
+
+ if (preve == dmr->start) {
+ klm->key = cpu_to_be32(dmr->mr.key);
+ klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
+ preve = dmr->end;
+ } else {
+ klm->key = cpu_to_be32(mvdev->res.null_mkey);
+ klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
+ preve = dmr->start;
+ goto again;
+ }
+ }
+}
+
+static int klm_byte_size(int nklms)
+{
+ return 16 * ALIGN(nklms, 4);
+}
+
+static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ int inlen;
+ void *mkc;
+ void *in;
+ int err;
+ u64 start;
+ u64 len;
+
+ start = indir_start_addr(mr);
+ len = indir_len(mr);
+ if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
+ return -EINVAL;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET64(mkc, mkc, start_addr, start);
+ MLX5_SET64(mkc, mkc, len, len);
+ MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
+ fill_indir(mvdev, mr, in);
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
+ kfree(in);
+ return err;
+}
+
+static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
+}
+
+static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
+ struct vhost_iotlb *iotlb)
+{
+ struct vhost_iotlb_map *map;
+ unsigned long lgcd = 0;
+ int log_entity_size;
+ unsigned long size;
+ u64 start = 0;
+ int err;
+ struct page *pg;
+ unsigned int nsg;
+ int sglen;
+ u64 pa;
+ u64 paend;
+ struct scatterlist *sg;
+ struct device *dma = mvdev->mdev->device;
+ int ret;
+
+ for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
+ size = maplen(map, mr);
+ lgcd = gcd(lgcd, size);
+ start += size;
+ }
+ log_entity_size = ilog2(lgcd);
+
+ sglen = 1 << log_entity_size;
+ nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
+
+ err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
+ if (err)
+ return err;
+
+ sg = mr->sg_head.sgl;
+ for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+ paend = map->addr + maplen(map, mr);
+ for (pa = map->addr; pa < paend; pa += sglen) {
+ pg = pfn_to_page(__phys_to_pfn(pa));
+ if (!sg) {
+ mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
+ map->start, map->last + 1);
+ err = -ENOMEM;
+ goto err_map;
+ }
+ sg_set_page(sg, pg, sglen, 0);
+ sg = sg_next(sg);
+ if (!sg)
+ goto done;
+ }
+ }
+done:
+ mr->log_size = log_entity_size;
+ mr->nsg = nsg;
+ ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!ret)
+ goto err_map;
+
+ err = create_direct_mr(mvdev, mr);
+ if (err)
+ goto err_direct;
+
+ return 0;
+
+err_direct:
+ dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+err_map:
+ sg_free_table(&mr->sg_head);
+ return err;
+}
+
+static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ struct device *dma = mvdev->mdev->device;
+
+ destroy_direct_mr(mvdev, mr);
+ dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&mr->sg_head);
+}
+
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+ LIST_HEAD(tmp);
+ u64 st;
+ u64 sz;
+ int err;
+ int i = 0;
+
+ st = start;
+ while (size) {
+ sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
+ dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
+ if (!dmr) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ dmr->start = st;
+ dmr->end = st + sz;
+ dmr->perm = perm;
+ err = map_direct_mr(mvdev, dmr, iotlb);
+ if (err) {
+ kfree(dmr);
+ goto err_alloc;
+ }
+
+ list_add_tail(&dmr->list, &tmp);
+ size -= sz;
+ mr->num_directs++;
+ mr->num_klms++;
+ st += sz;
+ i++;
+ }
+ list_splice_tail(&tmp, &mr->head);
+ return 0;
+
+err_alloc:
+ list_for_each_entry_safe(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ return err;
+}
+
+/* The iotlb pointer contains a list of maps. Go over the maps, possibly
+ * merging mergeable maps, and create direct memory keys that provide the
+ * device access to memory. The direct mkeys are then referred to by the
+ * indirect memory key that provides access to the enitre address space given
+ * by iotlb.
+ */
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+ struct vhost_iotlb_map *map;
+ u32 pperm = U16_MAX;
+ u64 last = U64_MAX;
+ u64 ps = U64_MAX;
+ u64 pe = U64_MAX;
+ u64 start = 0;
+ int err = 0;
+ int nnuls;
+
+ if (mr->initialized)
+ return 0;
+
+ INIT_LIST_HEAD(&mr->head);
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ start = map->start;
+ if (pe == map->start && pperm == map->perm) {
+ pe = map->last + 1;
+ } else {
+ if (ps != U64_MAX) {
+ if (pe < map->start) {
+ /* We have a hole in the map. Check how
+ * many null keys are required to fill it.
+ */
+ nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
+ LOG_MAX_KLM_SIZE);
+ mr->num_klms += nnuls;
+ }
+ err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ if (err)
+ goto err_chain;
+ }
+ ps = map->start;
+ pe = map->last + 1;
+ pperm = map->perm;
+ }
+ }
+ err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ if (err)
+ goto err_chain;
+
+ /* Create the memory key that defines the guests's address space. This
+ * memory key refers to the direct keys that contain the MTT
+ * translations
+ */
+ err = create_indirect_key(mvdev, mr);
+ if (err)
+ goto err_chain;
+
+ mr->initialized = true;
+ return 0;
+
+err_chain:
+ list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+ mutex_lock(&mr->mkey_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mr->mkey_mtx);
+ return err;
+}
+
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+
+ mutex_lock(&mr->mkey_mtx);
+ if (!mr->initialized)
+ goto out;
+
+ destroy_indirect_key(mvdev, mr);
+ list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ memset(mr, 0, sizeof(*mr));
+ mr->initialized = false;
+out:
+ mutex_unlock(&mr->mkey_mtx);
+}
+
+static bool map_empty(struct vhost_iotlb *iotlb)
+{
+ return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
+}
+
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ bool *change_map)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err = 0;
+
+ *change_map = false;
+ if (map_empty(iotlb)) {
+ mlx5_vdpa_destroy_mr(mvdev);
+ return 0;
+ }
+ mutex_lock(&mr->mkey_mtx);
+ if (mr->initialized) {
+ mlx5_vdpa_info(mvdev, "memory map update\n");
+ *change_map = true;
+ }
+ if (!*change_map)
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mr->mkey_mtx);
+
+ return err;
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
new file mode 100644
index 000000000000..96e6421c5d1c
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_vdpa.h"
+
+static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+
+ err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
+ if (!err)
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
+
+ return err;
+}
+
+static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
+}
+
+static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
+ if (!err)
+ *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
+ return err;
+}
+
+static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
+{
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ int inlen;
+ void *in;
+ int err;
+
+ /* 0 means not supported */
+ if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
+
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (!err)
+ *uid = MLX5_GET(create_uctx_out, out, uid);
+
+ return err;
+}
+
+static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+ MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
+ err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
+}
+
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
+}
+
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
+}
+
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
+
+ err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
+
+ return err;
+}
+
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
+}
+
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+ int inlen)
+{
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
+ u32 mkey_index;
+ void *mkc;
+ int err;
+
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
+ if (err)
+ return err;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
+ return 0;
+}
+
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
+
+ MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
+}
+
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
+ struct mlx5_vdpa_resources *res = &mvdev->res;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
+ u64 kick_addr;
+ int err;
+
+ if (res->valid) {
+ mlx5_vdpa_warn(mvdev, "resources already allocated\n");
+ return -EINVAL;
+ }
+ mutex_init(&mvdev->mr.mkey_mtx);
+ res->uar = mlx5_get_uars_page(mdev);
+ if (IS_ERR(res->uar)) {
+ err = PTR_ERR(res->uar);
+ goto err_uars;
+ }
+
+ err = create_uctx(mvdev, &res->uid);
+ if (err)
+ goto err_uctx;
+
+ err = alloc_pd(mvdev, &res->pdn, res->uid);
+ if (err)
+ goto err_pd;
+
+ err = get_null_mkey(mvdev, &res->null_mkey);
+ if (err)
+ goto err_key;
+
+ kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+ res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
+ if (!res->kick_addr) {
+ err = -ENOMEM;
+ goto err_key;
+ }
+ res->valid = true;
+
+ return 0;
+
+err_key:
+ dealloc_pd(mvdev, res->pdn, res->uid);
+err_pd:
+ destroy_uctx(mvdev, res->uid);
+err_uctx:
+ mlx5_put_uars_page(mdev, res->uar);
+err_uars:
+ mutex_destroy(&mvdev->mr.mkey_mtx);
+ return err;
+}
+
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_resources *res = &mvdev->res;
+
+ if (!res->valid)
+ return;
+
+ iounmap(res->kick_addr);
+ res->kick_addr = NULL;
+ dealloc_pd(mvdev, res->pdn, res->uid);
+ destroy_uctx(mvdev, res->uid);
+ mlx5_put_uars_page(mvdev->mdev, res->uar);
+ mutex_destroy(&mvdev->mr.mkey_mtx);
+ res->valid = false;
+}
diff --git a/drivers/vdpa/mlx5/net/main.c b/drivers/vdpa/mlx5/net/main.c
new file mode 100644
index 000000000000..838cd98386ff
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/main.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vnet.h"
+
+MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox VDPA driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static bool required_caps_supported(struct mlx5_core_dev *mdev)
+{
+ u8 event_mode;
+ u64 got;
+
+ got = MLX5_CAP_GEN_64(mdev, general_obj_types);
+
+ if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
+ return false;
+
+ event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode);
+ if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
+ return false;
+
+ if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type))
+ return false;
+
+ return true;
+}
+
+static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vdpa_dev *vdev;
+
+ if (mlx5_core_is_pf(mdev))
+ return NULL;
+
+ if (!required_caps_supported(mdev)) {
+ dev_info(mdev->device, "virtio net emulation not supported\n");
+ return NULL;
+ }
+ vdev = mlx5_vdpa_add_dev(mdev);
+ if (IS_ERR(vdev))
+ return NULL;
+
+ return vdev;
+}
+
+static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context)
+{
+ struct mlx5_vdpa_dev *vdev = context;
+
+ mlx5_vdpa_remove_dev(vdev);
+}
+
+static struct mlx5_interface mlx5_vdpa_interface = {
+ .add = mlx5_vdpa_add,
+ .remove = mlx5_vdpa_remove,
+ .protocol = MLX5_INTERFACE_PROTOCOL_VDPA,
+};
+
+static int __init mlx5_vdpa_init(void)
+{
+ return mlx5_register_interface(&mlx5_vdpa_interface);
+}
+
+static void __exit mlx5_vdpa_exit(void)
+{
+ mlx5_unregister_interface(&mlx5_vdpa_interface);
+}
+
+module_init(mlx5_vdpa_init);
+module_exit(mlx5_vdpa_exit);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
new file mode 100644
index 000000000000..70676a6d1691
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -0,0 +1,1974 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vnet.h"
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vdpa.h"
+
+#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
+
+#define VALID_FEATURES_MASK \
+ (BIT_ULL(VIRTIO_NET_F_CSUM) | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_MAC) | \
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | \
+ BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | \
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | \
+ BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX) | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+ BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | \
+ BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | \
+ BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX) | BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY) | \
+ BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | \
+ BIT_ULL(VIRTIO_F_RING_PACKED) | BIT_ULL(VIRTIO_F_ORDER_PLATFORM) | BIT_ULL(VIRTIO_F_SR_IOV))
+
+#define VALID_STATUS_MASK \
+ (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
+ VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+
+struct mlx5_vdpa_net_resources {
+ u32 tisn;
+ u32 tdn;
+ u32 tirn;
+ u32 rqtn;
+ bool valid;
+};
+
+struct mlx5_vdpa_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int cqe_size;
+ int nent;
+};
+
+struct mlx5_vdpa_cq {
+ struct mlx5_core_cq mcq;
+ struct mlx5_vdpa_cq_buf buf;
+ struct mlx5_db db;
+ int cqe;
+};
+
+struct mlx5_vdpa_umem {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int size;
+ u32 id;
+};
+
+struct mlx5_vdpa_qp {
+ struct mlx5_core_qp mqp;
+ struct mlx5_frag_buf frag_buf;
+ struct mlx5_db db;
+ u16 head;
+ bool fw;
+};
+
+struct mlx5_vq_restore_info {
+ u32 num_ent;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u16 avail_index;
+ bool ready;
+ struct vdpa_callback cb;
+ bool restore;
+};
+
+struct mlx5_vdpa_virtqueue {
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num_ent;
+ struct vdpa_callback event_cb;
+
+ /* Resources for implementing the notification channel from the device
+ * to the driver. fwqp is the firmware end of an RC connection; the
+ * other end is vqqp used by the driver. cq is is where completions are
+ * reported.
+ */
+ struct mlx5_vdpa_cq cq;
+ struct mlx5_vdpa_qp fwqp;
+ struct mlx5_vdpa_qp vqqp;
+
+ /* umem resources are required for the virtqueue operation. They're use
+ * is internal and they must be provided by the driver.
+ */
+ struct mlx5_vdpa_umem umem1;
+ struct mlx5_vdpa_umem umem2;
+ struct mlx5_vdpa_umem umem3;
+
+ bool initialized;
+ int index;
+ u32 virtq_id;
+ struct mlx5_vdpa_net *ndev;
+ u16 avail_idx;
+ int fw_state;
+
+ /* keep last in the struct */
+ struct mlx5_vq_restore_info ri;
+};
+
+/* We will remove this limitation once mlx5_vdpa_alloc_resources()
+ * provides for driver space allocation
+ */
+#define MLX5_MAX_SUPPORTED_VQS 16
+
+struct mlx5_vdpa_net {
+ struct mlx5_vdpa_dev mvdev;
+ struct mlx5_vdpa_net_resources res;
+ struct virtio_net_config config;
+ struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+
+ /* Serialize vq resources creation and destruction. This is required
+ * since memory map might change and we need to destroy and create
+ * resources while driver in operational.
+ */
+ struct mutex reslock;
+ struct mlx5_flow_table *rxft;
+ struct mlx5_fc *rx_counter;
+ struct mlx5_flow_handle *rx_rule;
+ bool setup;
+ u16 mtu;
+};
+
+static void free_resources(struct mlx5_vdpa_net *ndev);
+static void init_mvqs(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_net *ndev);
+static void teardown_driver(struct mlx5_vdpa_net *ndev);
+
+static bool mlx5_vdpa_debug;
+
+#define MLX5_LOG_VIO_FLAG(_feature) \
+ do { \
+ if (features & BIT_ULL(_feature)) \
+ mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
+ } while (0)
+
+#define MLX5_LOG_VIO_STAT(_status) \
+ do { \
+ if (status & (_status)) \
+ mlx5_vdpa_info(mvdev, "%s\n", #_status); \
+ } while (0)
+
+static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
+{
+ if (status & ~VALID_STATUS_MASK)
+ mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
+ status & ~VALID_STATUS_MASK);
+
+ if (!mlx5_vdpa_debug)
+ return;
+
+ mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
+ if (set && !status) {
+ mlx5_vdpa_info(mvdev, "driver resets the device\n");
+ return;
+ }
+
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
+}
+
+static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
+{
+ if (features & ~VALID_FEATURES_MASK)
+ mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
+ features & ~VALID_FEATURES_MASK);
+
+ if (!mlx5_vdpa_debug)
+ return;
+
+ mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
+ if (!features)
+ mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
+
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
+}
+
+static int create_tis(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+ int err;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
+ err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
+ if (err)
+ mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
+
+ return err;
+}
+
+static void destroy_tis(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
+}
+
+#define MLX5_VDPA_CQE_SIZE 64
+#define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
+
+static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
+ u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
+ buf->cqe_size = MLX5_VDPA_CQE_SIZE;
+ buf->nent = nent;
+
+ return 0;
+}
+
+static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
+{
+ struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
+
+ return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
+}
+
+static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
+}
+
+static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
+{
+ struct mlx5_cqe64 *cqe64;
+ void *cqe;
+ int i;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = get_cqe(vcq, i);
+ cqe64 = cqe;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
+{
+ struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
+ return cqe64;
+
+ return NULL;
+}
+
+static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
+{
+ vqp->head += n;
+ vqp->db.db[0] = cpu_to_be32(vqp->head);
+}
+
+static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
+ struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
+{
+ struct mlx5_vdpa_qp *vqp;
+ __be64 *pas;
+ void *qpc;
+
+ vqp = fw ? &mvq->fwqp : &mvq->vqqp;
+ MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ if (vqp->fw) {
+ /* Firmware QP is allocated by the driver for the firmware's
+ * use so we can skip part of the params as they will be chosen by firmware
+ */
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ return;
+ }
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
+ MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
+ mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
+}
+
+static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
+{
+ return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
+ num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
+}
+
+static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_vdpa_qp *vqp)
+{
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ void *qpc;
+ void *in;
+ int err;
+
+ if (!vqp->fw) {
+ vqp = &mvq->vqqp;
+ err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
+ if (err)
+ return err;
+
+ err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
+ if (err)
+ goto err_db;
+ inlen += vqp->frag_buf.npages * sizeof(__be64);
+ }
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ if (!vqp->fw)
+ MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (err)
+ goto err_kzalloc;
+
+ vqp->mqp.uid = ndev->mvdev.res.uid;
+ vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ if (!vqp->fw)
+ rx_post(vqp, mvq->num_ent);
+
+ return 0;
+
+err_kzalloc:
+ if (!vqp->fw)
+ mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+err_db:
+ if (!vqp->fw)
+ rq_buf_free(ndev, vqp);
+
+ return err;
+}
+
+static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
+ MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
+ if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
+ if (!vqp->fw) {
+ mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+ rq_buf_free(ndev, vqp);
+ }
+}
+
+static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
+{
+ return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
+{
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = next_cqe_sw(vcq);
+ if (!cqe64)
+ return -EAGAIN;
+
+ vcq->mcq.cons_index++;
+ return 0;
+}
+
+static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ mlx5_cq_set_ci(&mvq->cq.mcq);
+ rx_post(&mvq->vqqp, num);
+ if (mvq->event_cb.callback)
+ mvq->event_cb.callback(mvq->event_cb.private);
+}
+
+static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
+{
+ struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
+ struct mlx5_vdpa_net *ndev = mvq->ndev;
+ void __iomem *uar_page = ndev->mvdev.res.uar->map;
+ int num = 0;
+
+ while (!mlx5_vdpa_poll_one(&mvq->cq)) {
+ num++;
+ if (num > mvq->num_ent / 2) {
+ /* If completions keep coming while we poll, we want to
+ * let the hardware know that we consumed them by
+ * updating the doorbell record. We also let vdpa core
+ * know about this so it passes it on the virtio driver
+ * on the guest.
+ */
+ mlx5_vdpa_handle_completions(mvq, num);
+ num = 0;
+ }
+ }
+
+ if (num)
+ mlx5_vdpa_handle_completions(mvq, num);
+
+ mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+}
+
+static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
+{
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ void __iomem *uar_page = ndev->mvdev.res.uar->map;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_vdpa_cq *vcq = &mvq->cq;
+ unsigned int irqn;
+ __be64 *pas;
+ int inlen;
+ void *cqc;
+ void *in;
+ int err;
+ int eqn;
+
+ err = mlx5_db_alloc(mdev, &vcq->db);
+ if (err)
+ return err;
+
+ vcq->mcq.set_ci_db = vcq->db.db;
+ vcq->mcq.arm_db = vcq->db.db + 1;
+ vcq->mcq.cqe_sz = 64;
+
+ err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
+ if (err)
+ goto err_db;
+
+ cq_frag_buf_init(vcq, &vcq->buf);
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_vzalloc;
+ }
+
+ MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
+ /* Use vector 0 by default. Consider adding code to choose least used
+ * vector.
+ */
+ err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+ if (err)
+ goto err_vec;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
+ MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
+
+ err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_vec;
+
+ vcq->mcq.comp = mlx5_vdpa_cq_comp;
+ vcq->cqe = num_ent;
+ vcq->mcq.set_ci_db = vcq->db.db;
+ vcq->mcq.arm_db = vcq->db.db + 1;
+ mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+ kfree(in);
+ return 0;
+
+err_vec:
+ kfree(in);
+err_vzalloc:
+ cq_frag_buf_free(ndev, &vcq->buf);
+err_db:
+ mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+ return err;
+}
+
+static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+{
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ struct mlx5_vdpa_cq *vcq = &mvq->cq;
+
+ if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
+ return;
+ }
+ cq_frag_buf_free(ndev, &vcq->buf);
+ mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+}
+
+static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ struct mlx5_vdpa_umem **umemp)
+{
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int p_a;
+ int p_b;
+
+ switch (num) {
+ case 1:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+ *umemp = &mvq->umem1;
+ break;
+ case 2:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+ *umemp = &mvq->umem2;
+ break;
+ case 3:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+ *umemp = &mvq->umem3;
+ break;
+ }
+ return p_a * mvq->num_ent + p_b;
+}
+
+static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
+}
+
+static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ int inlen;
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
+ void *um;
+ void *in;
+ int err;
+ __be64 *pas;
+ int size;
+ struct mlx5_vdpa_umem *umem;
+
+ size = umem_size(ndev, mvq, num, &umem);
+ if (size < 0)
+ return size;
+
+ umem->size = size;
+ err = umem_frag_buf_alloc(ndev, umem, size);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
+ MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
+ um = MLX5_ADDR_OF(create_umem_in, in, umem);
+ MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
+
+ pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
+ mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
+ goto err_cmd;
+ }
+
+ kfree(in);
+ umem->id = MLX5_GET(create_umem_out, out, umem_id);
+
+ return 0;
+
+err_cmd:
+ kfree(in);
+err_in:
+ umem_frag_buf_free(ndev, umem);
+ return err;
+}
+
+static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
+ struct mlx5_vdpa_umem *umem;
+
+ switch (num) {
+ case 1:
+ umem = &mvq->umem1;
+ break;
+ case 2:
+ umem = &mvq->umem2;
+ break;
+ case 3:
+ umem = &mvq->umem3;
+ break;
+ }
+
+ MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+ return;
+
+ umem_frag_buf_free(ndev, umem);
+}
+
+static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int num;
+ int err;
+
+ for (num = 1; num <= 3; num++) {
+ err = create_umem(ndev, mvq, num);
+ if (err)
+ goto err_umem;
+ }
+ return 0;
+
+err_umem:
+ for (num--; num > 0; num--)
+ umem_destroy(ndev, mvq, num);
+
+ return err;
+}
+
+static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int num;
+
+ for (num = 3; num > 0; num--)
+ umem_destroy(ndev, mvq, num);
+}
+
+static int get_queue_type(struct mlx5_vdpa_net *ndev)
+{
+ u32 type_mask;
+
+ type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
+
+ /* prefer split queue */
+ if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+
+ WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+}
+
+static bool vq_is_tx(u16 idx)
+{
+ return idx % 2;
+}
+
+static u16 get_features_12_3(u64 features)
+{
+ return (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO4)) << 9) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO6)) << 8) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_CSUM)) << 7) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
+}
+
+static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
+ u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+ void *vq_ctx;
+ void *in;
+ int err;
+
+ err = umems_create(ndev, mvq);
+ if (err)
+ return err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
+ get_features_12_3(ndev->mvdev.actual_features));
+ vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
+
+ if (vq_is_tx(mvq->index))
+ MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
+
+ MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
+ MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
+ MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
+ MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
+ MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
+ !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+ MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+ MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+ MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
+ MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
+ MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_cmd;
+
+ kfree(in);
+ mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+
+err_cmd:
+ kfree(in);
+err_alloc:
+ umems_destroy(ndev, mvq);
+ return err;
+}
+
+static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
+
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
+ return;
+ }
+ umems_destroy(ndev, mvq);
+}
+
+static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+ return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
+}
+
+static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+ return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
+}
+
+static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
+ int *outlen, u32 qpn, u32 rqpn)
+{
+ void *qpc;
+ void *pp;
+
+ switch (cmd) {
+ case MLX5_CMD_OP_2RST_QP:
+ *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
+ *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(*outlen, GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(qp_2rst_in, *in, opcode, cmd);
+ MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(qp_2rst_in, *in, qpn, qpn);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
+ MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, vhca_port_num, 1);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
+ MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ MLX5_SET(qpc, qpc, log_msg_max, 30);
+ MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, fl, 1);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
+ MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, ack_timeout, 14);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7);
+ break;
+ default:
+ goto outerr_nullify;
+ }
+
+ return;
+
+outerr:
+ kfree(*in);
+ kfree(*out);
+outerr_nullify:
+ *in = NULL;
+ *out = NULL;
+}
+
+static void free_inout(void *in, void *out)
+{
+ kfree(in);
+ kfree(out);
+}
+
+/* Two QPs are used by each virtqueue. One is used by the driver and one by
+ * firmware. The fw argument indicates whether the subjected QP is the one used
+ * by firmware.
+ */
+static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
+{
+ int outlen;
+ int inlen;
+ void *out;
+ void *in;
+ int err;
+
+ alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
+ if (!in || !out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
+ free_inout(in, out);
+ return err;
+}
+
+static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
+ if (err)
+ return err;
+
+ return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
+}
+
+struct mlx5_virtq_attr {
+ u8 state;
+ u16 available_index;
+};
+
+static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtq_attr *attr)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
+ u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
+ void *out;
+ void *obj_context;
+ void *cmd_hdr;
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
+ if (err)
+ goto err_cmd;
+
+ obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
+ memset(attr, 0, sizeof(*attr));
+ attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
+ attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ kfree(out);
+ return 0;
+
+err_cmd:
+ kfree(out);
+ return err;
+}
+
+static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
+ u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+ void *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
+ MLX5_VIRTQ_MODIFY_MASK_STATE);
+ MLX5_SET(virtio_net_q_object, obj_context, state, state);
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (!err)
+ mvq->fw_state = state;
+
+ return err;
+}
+
+static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u16 idx = mvq->index;
+ int err;
+
+ if (!mvq->num_ent)
+ return 0;
+
+ if (mvq->initialized) {
+ mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
+ return -EINVAL;
+ }
+
+ err = cq_create(ndev, idx, mvq->num_ent);
+ if (err)
+ return err;
+
+ err = qp_create(ndev, mvq, &mvq->fwqp);
+ if (err)
+ goto err_fwqp;
+
+ err = qp_create(ndev, mvq, &mvq->vqqp);
+ if (err)
+ goto err_vqqp;
+
+ err = connect_qps(ndev, mvq);
+ if (err)
+ goto err_connect;
+
+ err = create_virtqueue(ndev, mvq);
+ if (err)
+ goto err_connect;
+
+ if (mvq->ready) {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
+ idx, err);
+ goto err_connect;
+ }
+ }
+
+ mvq->initialized = true;
+ return 0;
+
+err_connect:
+ qp_destroy(ndev, &mvq->vqqp);
+err_vqqp:
+ qp_destroy(ndev, &mvq->fwqp);
+err_fwqp:
+ cq_destroy(ndev, idx);
+ return err;
+}
+
+static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_virtq_attr attr;
+
+ if (!mvq->initialized)
+ return;
+
+ if (query_virtqueue(ndev, mvq, &attr)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
+ return;
+ }
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return;
+
+ if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
+ mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
+}
+
+static void suspend_vqs(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+ suspend_vq(ndev, &ndev->vqs[i]);
+}
+
+static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ if (!mvq->initialized)
+ return;
+
+ suspend_vq(ndev, mvq);
+ destroy_virtqueue(ndev, mvq);
+ qp_destroy(ndev, &mvq->vqqp);
+ qp_destroy(ndev, &mvq->fwqp);
+ cq_destroy(ndev, mvq->index);
+ mvq->initialized = false;
+}
+
+static int create_rqt(struct mlx5_vdpa_net *ndev)
+{
+ int log_max_rqt;
+ __be32 *list;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+ log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (log_max_rqt < 1)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+ for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+ if (!ndev->vqs[j].initialized)
+ continue;
+
+ if (!vq_is_tx(ndev->vqs[j].index)) {
+ list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+ i++;
+ }
+ }
+
+ err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
+ kfree(in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void destroy_rqt(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
+}
+
+static int create_tir(struct mlx5_vdpa_net *ndev)
+{
+#define HASH_IP_L4PORTS \
+ (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+ static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
+ void *rss_key;
+ void *outer;
+ void *tirc;
+ void *in;
+ int err;
+
+ in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
+
+ outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
+
+ MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
+ MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
+
+ err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
+ kfree(in);
+ return err;
+}
+
+static void destroy_tir(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
+}
+
+static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ int err;
+
+ /* for now, one entry, match all, forward to tir */
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns) {
+ mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ndev->rxft))
+ return PTR_ERR(ndev->rxft);
+
+ ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
+ if (IS_ERR(ndev->rx_counter)) {
+ err = PTR_ERR(ndev->rx_counter);
+ goto err_fc;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[0].tir_num = ndev->res.tirn;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
+ ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
+ if (IS_ERR(ndev->rx_rule)) {
+ err = PTR_ERR(ndev->rx_rule);
+ ndev->rx_rule = NULL;
+ goto err_rule;
+ }
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+err_fc:
+ mlx5_destroy_flow_table(ndev->rxft);
+ return err;
+}
+
+static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+ if (!ndev->rx_rule)
+ return;
+
+ mlx5_del_flow_rules(ndev->rx_rule);
+ mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+ mlx5_destroy_flow_table(ndev->rxft);
+
+ ndev->rx_rule = NULL;
+}
+
+static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (unlikely(!mvq->ready))
+ return;
+
+ iowrite16(idx, ndev->mvdev.res.kick_addr);
+}
+
+static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
+ u64 driver_area, u64 device_area)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ mvq->desc_addr = desc_area;
+ mvq->device_addr = device_area;
+ mvq->driver_addr = driver_area;
+ return 0;
+}
+
+static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ mvq = &ndev->vqs[idx];
+ mvq->num_ent = num;
+}
+
+static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
+
+ vq->event_cb = *cb;
+}
+
+static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (!ready)
+ suspend_vq(ndev, mvq);
+
+ mvq->ready = ready;
+}
+
+static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ return mvq->ready;
+}
+
+static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
+ const struct vdpa_vq_state *state)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
+ mlx5_vdpa_warn(mvdev, "can't modify available index\n");
+ return -EINVAL;
+ }
+
+ mvq->avail_idx = state->avail_index;
+ return 0;
+}
+
+static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_virtq_attr attr;
+ int err;
+
+ if (!mvq->initialized)
+ return -EAGAIN;
+
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ return err;
+ }
+ state->avail_index = attr.available_index;
+ return 0;
+}
+
+static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
+{
+ return PAGE_SIZE;
+}
+
+enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
+ MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
+ MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
+ MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
+};
+
+static u64 mlx_to_vritio_features(u16 dev_features)
+{
+ u64 result = 0;
+
+ if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
+ result |= BIT_ULL(VIRTIO_NET_F_GUEST_CSUM);
+ if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
+ result |= BIT_ULL(VIRTIO_NET_F_CSUM);
+ if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
+ result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO6);
+ if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
+ result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO4);
+
+ return result;
+}
+
+static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ u16 dev_features;
+
+ dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
+ ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ print_features(mvdev, ndev->mvdev.mlx_features, false);
+ return ndev->mvdev.mlx_features;
+}
+
+static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+{
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto err_vq;
+ }
+
+ return 0;
+
+err_vq:
+ for (--i; i >= 0; i--)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ return err;
+}
+
+static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ mvq = &ndev->vqs[i];
+ if (!mvq->initialized)
+ continue;
+
+ teardown_vq(ndev, mvq);
+ }
+}
+
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+ return virtio_legacy_is_little_endian() ||
+ (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err;
+
+ print_features(mvdev, features, true);
+
+ err = verify_min_features(mvdev, features);
+ if (err)
+ return err;
+
+ ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
+ ndev->mtu);
+ return err;
+}
+
+static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
+{
+ /* not implemented */
+ mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
+}
+
+#define MLX5_VDPA_MAX_VQ_ENTRIES 256
+static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
+{
+ return MLX5_VDPA_MAX_VQ_ENTRIES;
+}
+
+static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
+{
+ return VIRTIO_ID_NET;
+}
+
+static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
+{
+ return PCI_VENDOR_ID_MELLANOX;
+}
+
+static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ print_status(mvdev, ndev->mvdev.status, false);
+ return ndev->mvdev.status;
+}
+
+static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_vq_restore_info *ri = &mvq->ri;
+ struct mlx5_virtq_attr attr;
+ int err;
+
+ if (!mvq->initialized)
+ return 0;
+
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err)
+ return err;
+
+ ri->avail_index = attr.available_index;
+ ri->ready = mvq->ready;
+ ri->num_ent = mvq->num_ent;
+ ri->desc_addr = mvq->desc_addr;
+ ri->device_addr = mvq->device_addr;
+ ri->driver_addr = mvq->driver_addr;
+ ri->cb = mvq->event_cb;
+ ri->restore = true;
+ return 0;
+}
+
+static int save_channels_info(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+ memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
+ save_channel_info(ndev, &ndev->vqs[i]);
+ }
+ return 0;
+}
+
+static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
+ memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+}
+
+static void restore_channels_info(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_vq_restore_info *ri;
+ int i;
+
+ mlx5_clear_vqs(ndev);
+ init_mvqs(ndev);
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ ri = &mvq->ri;
+ if (!ri->restore)
+ continue;
+
+ mvq->avail_idx = ri->avail_index;
+ mvq->ready = ri->ready;
+ mvq->num_ent = ri->num_ent;
+ mvq->desc_addr = ri->desc_addr;
+ mvq->device_addr = ri->device_addr;
+ mvq->driver_addr = ri->driver_addr;
+ mvq->event_cb = ri->cb;
+ }
+}
+
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ suspend_vqs(ndev);
+ err = save_channels_info(ndev);
+ if (err)
+ goto err_mr;
+
+ teardown_driver(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+ if (err)
+ goto err_mr;
+
+ restore_channels_info(ndev);
+ err = setup_driver(ndev);
+ if (err)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+err_mr:
+ return err;
+}
+
+static int setup_driver(struct mlx5_vdpa_net *ndev)
+{
+ int err;
+
+ mutex_lock(&ndev->reslock);
+ if (ndev->setup) {
+ mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+ err = 0;
+ goto out;
+ }
+ err = setup_virtqueues(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+ goto out;
+ }
+
+ err = create_rqt(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+ goto err_rqt;
+ }
+
+ err = create_tir(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+ goto err_tir;
+ }
+
+ err = add_fwd_to_tir(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+ goto err_fwd;
+ }
+ ndev->setup = true;
+ mutex_unlock(&ndev->reslock);
+
+ return 0;
+
+err_fwd:
+ destroy_tir(ndev);
+err_tir:
+ destroy_rqt(ndev);
+err_rqt:
+ teardown_virtqueues(ndev);
+out:
+ mutex_unlock(&ndev->reslock);
+ return err;
+}
+
+static void teardown_driver(struct mlx5_vdpa_net *ndev)
+{
+ mutex_lock(&ndev->reslock);
+ if (!ndev->setup)
+ goto out;
+
+ remove_fwd_to_tir(ndev);
+ destroy_tir(ndev);
+ destroy_rqt(ndev);
+ teardown_virtqueues(ndev);
+ ndev->setup = false;
+out:
+ mutex_unlock(&ndev->reslock);
+}
+
+static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err;
+
+ print_status(mvdev, status, true);
+ if (!status) {
+ mlx5_vdpa_info(mvdev, "performing device reset\n");
+ teardown_driver(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
+ ndev->mvdev.mlx_features = 0;
+ ++mvdev->generation;
+ return;
+ }
+
+ if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ err = setup_driver(ndev);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
+ goto err_setup;
+ }
+ } else {
+ mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
+ return;
+ }
+ }
+
+ ndev->mvdev.status = status;
+ return;
+
+err_setup:
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+}
+
+static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
+ unsigned int len)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ if (offset + len < sizeof(struct virtio_net_config))
+ memcpy(buf, (u8 *)&ndev->config + offset, len);
+}
+
+static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ /* not supported */
+}
+
+static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ return mvdev->generation;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ bool change_map;
+ int err;
+
+ err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
+ return err;
+ }
+
+ if (change_map)
+ return mlx5_vdpa_change_map(ndev, iotlb);
+
+ return 0;
+}
+
+static void mlx5_vdpa_free(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev;
+
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ free_resources(ndev);
+ mlx5_vdpa_free_resources(&ndev->mvdev);
+ mutex_destroy(&ndev->reslock);
+}
+
+static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
+{
+ struct vdpa_notification_area ret = {};
+
+ return ret;
+}
+
+static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct vdpa_config_ops mlx5_vdpa_ops = {
+ .set_vq_address = mlx5_vdpa_set_vq_address,
+ .set_vq_num = mlx5_vdpa_set_vq_num,
+ .kick_vq = mlx5_vdpa_kick_vq,
+ .set_vq_cb = mlx5_vdpa_set_vq_cb,
+ .set_vq_ready = mlx5_vdpa_set_vq_ready,
+ .get_vq_ready = mlx5_vdpa_get_vq_ready,
+ .set_vq_state = mlx5_vdpa_set_vq_state,
+ .get_vq_state = mlx5_vdpa_get_vq_state,
+ .get_vq_notification = mlx5_get_vq_notification,
+ .get_vq_irq = mlx5_get_vq_irq,
+ .get_vq_align = mlx5_vdpa_get_vq_align,
+ .get_features = mlx5_vdpa_get_features,
+ .set_features = mlx5_vdpa_set_features,
+ .set_config_cb = mlx5_vdpa_set_config_cb,
+ .get_vq_num_max = mlx5_vdpa_get_vq_num_max,
+ .get_device_id = mlx5_vdpa_get_device_id,
+ .get_vendor_id = mlx5_vdpa_get_vendor_id,
+ .get_status = mlx5_vdpa_get_status,
+ .set_status = mlx5_vdpa_set_status,
+ .get_config = mlx5_vdpa_get_config,
+ .set_config = mlx5_vdpa_set_config,
+ .get_generation = mlx5_vdpa_get_generation,
+ .set_map = mlx5_vdpa_set_map,
+ .free = mlx5_vdpa_free,
+};
+
+static int alloc_resources(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_net_resources *res = &ndev->res;
+ int err;
+
+ if (res->valid) {
+ mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
+ return -EEXIST;
+ }
+
+ err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
+ if (err)
+ return err;
+
+ err = create_tis(ndev);
+ if (err)
+ goto err_tis;
+
+ res->valid = true;
+
+ return 0;
+
+err_tis:
+ mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+ return err;
+}
+
+static void free_resources(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_net_resources *res = &ndev->res;
+
+ if (!res->valid)
+ return;
+
+ destroy_tis(ndev);
+ mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+ res->valid = false;
+}
+
+static void init_mvqs(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+ mvq->ndev = ndev;
+ mvq->fwqp.fw = true;
+ }
+ for (; i < ndev->mvdev.max_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+ mvq->ndev = ndev;
+ }
+}
+
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
+{
+ struct virtio_net_config *config;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ u32 max_vqs;
+ int err;
+
+ /* we save one virtqueue for control virtqueue should we require it */
+ max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+ max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+
+ ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
+ 2 * mlx5_vdpa_max_qps(max_vqs));
+ if (IS_ERR(ndev))
+ return ndev;
+
+ ndev->mvdev.max_vqs = max_vqs;
+ mvdev = &ndev->mvdev;
+ mvdev->mdev = mdev;
+ init_mvqs(ndev);
+ mutex_init(&ndev->reslock);
+ config = &ndev->config;
+ err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+ if (err)
+ goto err_mtu;
+
+ err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
+ if (err)
+ goto err_mtu;
+
+ mvdev->vdev.dma_dev = mdev->device;
+ err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ if (err)
+ goto err_mtu;
+
+ err = alloc_resources(ndev);
+ if (err)
+ goto err_res;
+
+ err = vdpa_register_device(&mvdev->vdev);
+ if (err)
+ goto err_reg;
+
+ return ndev;
+
+err_reg:
+ free_resources(ndev);
+err_res:
+ mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mtu:
+ mutex_destroy(&ndev->reslock);
+ put_device(&mvdev->vdev.dev);
+ return ERR_PTR(err);
+}
+
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
+{
+ vdpa_unregister_device(&mvdev->vdev);
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
new file mode 100644
index 000000000000..f2d6d68b020e
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VNET_H_
+#define __MLX5_VNET_H_
+
+#include <linux/vdpa.h>
+#include <linux/virtio_net.h>
+#include <linux/vringh.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+static inline u32 mlx5_vdpa_max_qps(int max_vqs)
+{
+ return max_vqs / 2;
+}
+
+#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev);
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev);
+
+#endif /* __MLX5_VNET_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index de211ef3738c..a69ffc991e13 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @nvqs: number of virtqueues supported by this device
* @size: size of the parent structure that contains private data
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ int nvqs,
size_t size)
{
struct vdpa_device *vdev;
@@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->dev.release = vdpa_release_dev;
vdev->index = err;
vdev->config = config;
+ vdev->features_valid = false;
+ vdev->nvqs = nvqs;
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c7334cc65bb2..62d640327145 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -24,6 +24,7 @@
#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
+#include <linux/virtio_byteorder.h>
#include <linux/vhost_iotlb.h>
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_net.h>
@@ -33,6 +34,10 @@
#define DRV_DESC "vDPA Device Simulator"
#define DRV_LICENSE "GPL v2"
+static int batch_mapping = 1;
+module_param(batch_mapping, int, 0444);
+MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+
struct vdpasim_virtqueue {
struct vringh vring;
struct vringh_kiov iov;
@@ -55,12 +60,12 @@ struct vdpasim_virtqueue {
static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
(1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM);
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM);
/* State of each vdpasim device */
struct vdpasim {
struct vdpa_device vdpa;
- struct vdpasim_virtqueue vqs[2];
+ struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
struct work_struct work;
/* spinlock to synchronize virtqueue state */
spinlock_t lock;
@@ -70,8 +75,27 @@ struct vdpasim {
u32 status;
u32 generation;
u64 features;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
};
+/* TODO: cross-endian support */
+static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
+{
+ return virtio_legacy_is_little_endian() ||
+ (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
+{
+ return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
+{
+ return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
+}
+
static struct vdpasim *vdpasim_dev;
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
@@ -118,7 +142,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
for (i = 0; i < VDPASIM_VQ_NUM; i++)
vdpasim_vq_reset(&vdpasim->vqs[i]);
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
+ spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
vdpasim->status = 0;
@@ -236,8 +262,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
/* For simplicity, use identical mapping to avoid e.g iova
* allocator.
*/
+ spin_lock(&vdpasim->iommu_lock);
ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
pa, dir_to_perm(dir));
+ spin_unlock(&vdpasim->iommu_lock);
if (ret)
return DMA_MAPPING_ERROR;
@@ -251,8 +279,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
}
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -264,9 +294,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
void *addr = kmalloc(size, flag);
int ret;
- if (!addr)
+ spin_lock(&vdpasim->iommu_lock);
+ if (!addr) {
*dma_addr = DMA_MAPPING_ERROR;
- else {
+ } else {
u64 pa = virt_to_phys(addr);
ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -279,6 +310,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
} else
*dma_addr = (dma_addr_t)pa;
}
+ spin_unlock(&vdpasim->iommu_lock);
return addr;
}
@@ -290,8 +322,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
+
kfree(phys_to_virt((uintptr_t)dma_addr));
}
@@ -303,21 +338,27 @@ static const struct dma_map_ops vdpasim_dma_ops = {
};
static const struct vdpa_config_ops vdpasim_net_config_ops;
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
static struct vdpasim *vdpasim_create(void)
{
- struct virtio_net_config *config;
+ const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
int ret = -ENOMEM;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
- &vdpasim_net_config_ops);
+ if (batch_mapping)
+ ops = &vdpasim_net_batch_config_ops;
+ else
+ ops = &vdpasim_net_config_ops;
+
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
if (!vdpasim)
goto err_alloc;
INIT_WORK(&vdpasim->work, vdpasim_work);
spin_lock_init(&vdpasim->lock);
+ spin_lock_init(&vdpasim->iommu_lock);
dev = &vdpasim->vdpa.dev;
dev->coherent_dma_mask = DMA_BIT_MASK(64);
@@ -331,10 +372,7 @@ static struct vdpasim *vdpasim_create(void)
if (!vdpasim->buffer)
goto err_iommu;
- config = &vdpasim->config;
- config->mtu = 1500;
- config->status = VIRTIO_NET_S_LINK_UP;
- eth_random_addr(config->mac);
+ eth_random_addr(vdpasim->config.mac);
vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
@@ -413,26 +451,29 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
return vq->ready;
}
-static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state)
+static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+ const struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
spin_lock(&vdpasim->lock);
- vrh->last_avail_idx = state;
+ vrh->last_avail_idx = state->avail_index;
spin_unlock(&vdpasim->lock);
return 0;
}
-static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
+static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
- return vrh->last_avail_idx;
+ state->avail_index = vrh->last_avail_idx;
+ return 0;
}
static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
@@ -448,13 +489,22 @@ static u64 vdpasim_get_features(struct vdpa_device *vdpa)
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct virtio_net_config *config = &vdpasim->config;
/* DMA mapping must be done by driver */
- if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return -EINVAL;
vdpasim->features = features & vdpasim_features;
+ /* We generally only know whether guest is using the legacy interface
+ * here, so generally that's the earliest we can set config fields.
+ * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
+ * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
+ */
+
+ config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
+ config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
return 0;
}
@@ -508,7 +558,7 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (offset + len < sizeof(struct virtio_net_config))
- memcpy(buf, &vdpasim->config + offset, len);
+ memcpy(buf, (u8 *)&vdpasim->config + offset, len);
}
static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
@@ -532,6 +582,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
u64 start = 0ULL, last = 0ULL - 1;
int ret;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -541,10 +592,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
if (ret)
goto err;
}
+ spin_unlock(&vdpasim->iommu_lock);
return 0;
err:
vhost_iotlb_reset(vdpasim->iommu);
+ spin_unlock(&vdpasim->iommu_lock);
return ret;
}
@@ -552,16 +605,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
u64 pa, u32 perm)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ int ret;
+
+ spin_lock(&vdpasim->iommu_lock);
+ ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
+ perm);
+ spin_unlock(&vdpasim->iommu_lock);
- return vhost_iotlb_add_range(vdpasim->iommu, iova,
- iova + size - 1, pa, perm);
+ return ret;
}
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
return 0;
}
@@ -597,12 +657,36 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
- .set_map = vdpasim_set_map,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
};
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+ .set_vq_address = vdpasim_set_vq_address,
+ .set_vq_num = vdpasim_set_vq_num,
+ .kick_vq = vdpasim_kick_vq,
+ .set_vq_cb = vdpasim_set_vq_cb,
+ .set_vq_ready = vdpasim_set_vq_ready,
+ .get_vq_ready = vdpasim_get_vq_ready,
+ .set_vq_state = vdpasim_set_vq_state,
+ .get_vq_state = vdpasim_get_vq_state,
+ .get_vq_align = vdpasim_get_vq_align,
+ .get_features = vdpasim_get_features,
+ .set_features = vdpasim_set_features,
+ .set_config_cb = vdpasim_set_config_cb,
+ .get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_device_id = vdpasim_get_device_id,
+ .get_vendor_id = vdpasim_get_vendor_id,
+ .get_status = vdpasim_get_status,
+ .set_status = vdpasim_set_status,
+ .get_config = vdpasim_get_config,
+ .set_config = vdpasim_set_config,
+ .get_generation = vdpasim_get_generation,
+ .set_map = vdpasim_set_map,
+ .free = vdpasim_free,
+};
+
static int __init vdpasim_dev_init(void)
{
vdpasim_dev = vdpasim_create();
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index de881a6cff35..1ab1f5cda4ac 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -60,6 +60,10 @@ module_param(enable_sriov, bool, 0644);
MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
#endif
+static bool disable_denylist;
+module_param(disable_denylist, bool, 0444);
+MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -69,6 +73,44 @@ static inline bool vfio_vga_disabled(void)
#endif
}
+static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+{
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
+{
+ if (!vfio_pci_dev_in_denylist(pdev))
+ return false;
+
+ if (disable_denylist) {
+ pci_warn(pdev,
+ "device denylist disabled - allowing device %04x:%04x.\n",
+ pdev->vendor, pdev->device);
+ return false;
+ }
+
+ pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
+ pdev->vendor, pdev->device);
+
+ return true;
+}
+
/*
* Our VGA arbiter participation is limited since we don't know anything
* about the device itself. However, if the device is the only VGA device
@@ -207,6 +249,8 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
case 0x1580 ... 0x1581:
case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2:
+ /* X550 */
+ case 0x1563:
return true;
default:
return false;
@@ -521,14 +565,12 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
+
mutex_lock(&vdev->igate);
if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
}
- mutex_unlock(&vdev->igate);
-
- mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
vdev->req_trigger = NULL;
@@ -948,7 +990,7 @@ static long vfio_pci_ioctl(void *device_data,
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -1856,6 +1898,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct iommu_group *group;
int ret;
+ if (vfio_pci_is_denylisted(pdev))
+ return -EINVAL;
+
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
@@ -2345,6 +2390,9 @@ static int __init vfio_pci_init(void)
vfio_pci_fill_ids();
+ if (disable_denylist)
+ pr_warn("device denylist disabled.\n");
+
return 0;
out_driver:
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 86a02aff8735..61ca8ab165dc 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -33,12 +33,14 @@
struct vfio_pci_ioeventfd {
struct list_head next;
+ struct vfio_pci_device *vdev;
struct virqfd *virqfd;
void __iomem *addr;
uint64_t data;
loff_t pos;
int bar;
int count;
+ bool test_mem;
};
struct vfio_pci_irq_ctx {
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 916b184df3a5..9e353c484ace 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -37,17 +37,70 @@
#define vfio_ioread8 ioread8
#define vfio_iowrite8 iowrite8
+#define VFIO_IOWRITE(size) \
+static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ vfio_iowrite##size(val, io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOWRITE(8)
+VFIO_IOWRITE(16)
+VFIO_IOWRITE(32)
+#ifdef iowrite64
+VFIO_IOWRITE(64)
+#endif
+
+#define VFIO_IOREAD(size) \
+static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ *val = vfio_ioread##size(io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOREAD(8)
+VFIO_IOREAD(16)
+VFIO_IOREAD(32)
+
/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
* range which is inaccessible. The excluded range drops writes and fills
* reads with -1. This is intended for handling MSI-X vector tables and
* leftover space for ROM BARs.
*/
-static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
size_t x_end, bool iswrite)
{
ssize_t done = 0;
+ int ret;
while (count) {
size_t fillable, filled;
@@ -66,9 +119,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- vfio_iowrite32(val, io + off);
+ ret = vfio_pci_iowrite32(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread32(io + off);
+ ret = vfio_pci_ioread32(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 4))
return -EFAULT;
@@ -82,9 +141,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- vfio_iowrite16(val, io + off);
+ ret = vfio_pci_iowrite16(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread16(io + off);
+ ret = vfio_pci_ioread16(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 2))
return -EFAULT;
@@ -98,9 +163,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- vfio_iowrite8(val, io + off);
+ ret = vfio_pci_iowrite8(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread8(io + off);
+ ret = vfio_pci_ioread8(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 1))
return -EFAULT;
@@ -178,14 +249,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
count = min(count, (size_t)(end - pos));
- if (res->flags & IORESOURCE_MEM) {
- down_read(&vdev->memory_lock);
- if (!__vfio_pci_memory_enabled(vdev)) {
- up_read(&vdev->memory_lock);
- return -EIO;
- }
- }
-
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
@@ -213,7 +276,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
x_end = vdev->msix_offset + vdev->msix_size;
}
- done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
+ done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+ count, x_start, x_end, iswrite);
if (done >= 0)
*ppos += done;
@@ -221,9 +285,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
out:
- if (res->flags & IORESOURCE_MEM)
- up_read(&vdev->memory_lock);
-
return done;
}
@@ -278,7 +339,12 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
return ret;
}
- done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
+ /*
+ * VGA MMIO is a legacy, non-BAR resource that hopefully allows
+ * probing, so we don't currently worry about access in relation
+ * to the memory enable bit in the command register.
+ */
+ done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
vga_put(vdev->pdev, rsrc);
@@ -296,17 +362,21 @@ static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
switch (ioeventfd->count) {
case 1:
- vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 2:
- vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 4:
- vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
- vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#endif
}
@@ -378,11 +448,13 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
goto out_unlock;
}
+ ioeventfd->vdev = vdev;
ioeventfd->addr = vdev->barmap[bar] + pos;
ioeventfd->data = data;
ioeventfd->pos = pos;
ioeventfd->bar = bar;
ioeventfd->count = count;
+ ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
NULL, NULL, &ioeventfd->virqfd, fd);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 580099afeaff..262ab0efd06c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -627,9 +627,10 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
* that error notification via MSI can be affected for platforms that handle
* MSI within the same IOVA space as DMA.
*/
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_allowed[] = { "pci-stub" };
-static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
+static bool vfio_dev_driver_allowed(struct device *dev,
+ struct device_driver *drv)
{
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -638,8 +639,8 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
return true;
}
- return match_string(vfio_driver_whitelist,
- ARRAY_SIZE(vfio_driver_whitelist),
+ return match_string(vfio_driver_allowed,
+ ARRAY_SIZE(vfio_driver_allowed),
drv->name) >= 0;
}
@@ -648,7 +649,7 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
* one of the following states:
* - driver-less
* - bound to a vfio driver
- * - bound to a whitelisted driver
+ * - bound to an otherwise allowed driver
* - a PCI interconnect device
*
* We use two methods to determine whether a device is bound to a vfio
@@ -674,7 +675,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
}
mutex_unlock(&group->unbound_lock);
- if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
+ if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
return 0;
device = vfio_group_get_device(group, dev);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 16b3adc508db..fe888b5dcc00 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -383,7 +383,7 @@ static void tce_iommu_unuse_page(struct tce_container *container,
struct page *page;
page = pfn_to_page(hpa >> PAGE_SHIFT);
- put_page(page);
+ unpin_user_page(page);
}
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
@@ -486,7 +486,7 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
struct page *page = NULL;
enum dma_data_direction direction = iommu_tce_direction(tce);
- if (get_user_pages_fast(tce & PAGE_MASK, 1,
+ if (pin_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
&page) != 1)
return -EFAULT;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5e556ac9102a..5fbf0c1f7433 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret) {
bool unlocked = false;
- ret = fixup_user_fault(NULL, mm, vaddr,
+ ret = fixup_user_fault(mm, vaddr,
FAULT_FLAG_REMOTE |
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
@@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE;
mmap_read_lock(mm);
- ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+ ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
page, NULL, NULL);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
@@ -1225,8 +1225,10 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
return 0;
unwind:
- list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+ list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
+ cond_resched();
+ }
return ret;
}
@@ -1422,13 +1424,16 @@ static int vfio_bus_type(struct device *dev, void *data)
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
- struct vfio_domain *d;
+ struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ if (!list_empty(&iommu->domain_list))
+ d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
@@ -1446,6 +1451,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
phys_addr_t p;
dma_addr_t i;
+ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
+ ret = -EINVAL;
+ goto unwind;
+ }
+
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
@@ -1475,7 +1485,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
- return ret;
+ goto unwind;
}
phys = pfn << PAGE_SHIFT;
@@ -1484,14 +1494,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
- if (ret)
- return ret;
+ if (ret) {
+ if (!dma->iommu_mapped)
+ vfio_unpin_pages_remote(dma, iova,
+ phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT,
+ true);
+ goto unwind;
+ }
iova += size;
}
+ }
+
+ /* All dmas are now mapped, defer to second tree walk for unwind */
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
dma->iommu_mapped = true;
}
+
return 0;
+
+unwind:
+ for (; n; n = rb_prev(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ dma_addr_t iova;
+
+ if (dma->iommu_mapped) {
+ iommu_unmap(domain->domain, dma->iova, dma->size);
+ continue;
+ }
+
+ iova = dma->iova;
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys, p;
+ size_t size;
+ dma_addr_t i;
+
+ phys = iommu_iova_to_phys(domain->domain, iova);
+ if (!phys) {
+ iova += PAGE_SIZE;
+ continue;
+ }
+
+ size = PAGE_SIZE;
+ p = phys + size;
+ i = iova + size;
+ while (i < dma->iova + dma->size &&
+ p == iommu_iova_to_phys(domain->domain, i)) {
+ size += PAGE_SIZE;
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ }
+
+ iommu_unmap(domain->domain, iova, size);
+ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, true);
+ }
+ }
+
+ return ret;
}
/*
@@ -2376,7 +2439,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
break;
case VFIO_TYPE1_NESTING_IOMMU:
iommu->nesting = true;
- /* fall through */
+ fallthrough;
case VFIO_TYPE1v2_IOMMU:
iommu->v2 = true;
break;
@@ -2453,6 +2516,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
return ret;
}
+static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ switch (arg) {
+ case VFIO_TYPE1_IOMMU:
+ case VFIO_TYPE1v2_IOMMU:
+ case VFIO_TYPE1_NESTING_IOMMU:
+ return 1;
+ case VFIO_DMA_CC_IOMMU:
+ if (!iommu)
+ return 0;
+ return vfio_domains_have_iommu_cache(iommu);
+ default:
+ return 0;
+ }
+}
+
static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
size_t size)
@@ -2529,241 +2609,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}
-static long vfio_iommu_type1_ioctl(void *iommu_data,
- unsigned int cmd, unsigned long arg)
+static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
+ unsigned long arg)
{
- struct vfio_iommu *iommu = iommu_data;
+ struct vfio_iommu_type1_info info;
unsigned long minsz;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
+ int ret;
- if (cmd == VFIO_CHECK_EXTENSION) {
- switch (arg) {
- case VFIO_TYPE1_IOMMU:
- case VFIO_TYPE1v2_IOMMU:
- case VFIO_TYPE1_NESTING_IOMMU:
- return 1;
- case VFIO_DMA_CC_IOMMU:
- if (!iommu)
- return 0;
- return vfio_domains_have_iommu_cache(iommu);
- default:
- return 0;
- }
- } else if (cmd == VFIO_IOMMU_GET_INFO) {
- struct vfio_iommu_type1_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
- int ret;
-
- minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
+ minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz)
- return -EINVAL;
+ if (info.argsz < minsz)
+ return -EINVAL;
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0; /* output, no-recopy necessary */
- }
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0; /* output, no-recopy necessary */
+ }
- mutex_lock(&iommu->lock);
- info.flags = VFIO_IOMMU_INFO_PGSIZES;
+ mutex_lock(&iommu->lock);
+ info.flags = VFIO_IOMMU_INFO_PGSIZES;
- info.iova_pgsizes = iommu->pgsize_bitmap;
+ info.iova_pgsizes = iommu->pgsize_bitmap;
- ret = vfio_iommu_migration_build_caps(iommu, &caps);
+ ret = vfio_iommu_migration_build_caps(iommu, &caps);
- if (!ret)
- ret = vfio_iommu_iova_build_caps(iommu, &caps);
+ if (!ret)
+ ret = vfio_iommu_iova_build_caps(iommu, &caps);
- mutex_unlock(&iommu->lock);
+ mutex_unlock(&iommu->lock);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
- if (caps.size) {
- info.flags |= VFIO_IOMMU_INFO_CAPS;
+ if (caps.size) {
+ info.flags |= VFIO_IOMMU_INFO_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
}
-
- kfree(caps.buf);
+ info.cap_offset = sizeof(info);
}
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ kfree(caps.buf);
+ }
- } else if (cmd == VFIO_IOMMU_MAP_DMA) {
- struct vfio_iommu_type1_dma_map map;
- uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
- VFIO_DMA_MAP_FLAG_WRITE;
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+}
- minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
+static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ struct vfio_iommu_type1_dma_map map;
+ unsigned long minsz;
+ uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
- if (copy_from_user(&map, (void __user *)arg, minsz))
- return -EFAULT;
+ minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
- if (map.argsz < minsz || map.flags & ~mask)
- return -EINVAL;
+ if (copy_from_user(&map, (void __user *)arg, minsz))
+ return -EFAULT;
- return vfio_dma_do_map(iommu, &map);
+ if (map.argsz < minsz || map.flags & ~mask)
+ return -EINVAL;
- } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
- struct vfio_iommu_type1_dma_unmap unmap;
- struct vfio_bitmap bitmap = { 0 };
- int ret;
+ return vfio_dma_do_map(iommu, &map);
+}
- minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
+static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ struct vfio_iommu_type1_dma_unmap unmap;
+ struct vfio_bitmap bitmap = { 0 };
+ unsigned long minsz;
+ int ret;
- if (copy_from_user(&unmap, (void __user *)arg, minsz))
- return -EFAULT;
+ minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
- if (unmap.argsz < minsz ||
- unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
- return -EINVAL;
+ if (copy_from_user(&unmap, (void __user *)arg, minsz))
+ return -EFAULT;
- if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
- unsigned long pgshift;
+ if (unmap.argsz < minsz ||
+ unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
+ return -EINVAL;
- if (unmap.argsz < (minsz + sizeof(bitmap)))
- return -EINVAL;
+ if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+ unsigned long pgshift;
- if (copy_from_user(&bitmap,
- (void __user *)(arg + minsz),
- sizeof(bitmap)))
- return -EFAULT;
+ if (unmap.argsz < (minsz + sizeof(bitmap)))
+ return -EINVAL;
- if (!access_ok((void __user *)bitmap.data, bitmap.size))
- return -EINVAL;
+ if (copy_from_user(&bitmap,
+ (void __user *)(arg + minsz),
+ sizeof(bitmap)))
+ return -EFAULT;
- pgshift = __ffs(bitmap.pgsize);
- ret = verify_bitmap_size(unmap.size >> pgshift,
- bitmap.size);
- if (ret)
- return ret;
- }
+ if (!access_ok((void __user *)bitmap.data, bitmap.size))
+ return -EINVAL;
- ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+ pgshift = __ffs(bitmap.pgsize);
+ ret = verify_bitmap_size(unmap.size >> pgshift,
+ bitmap.size);
if (ret)
return ret;
+ }
- return copy_to_user((void __user *)arg, &unmap, minsz) ?
+ ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+ if (ret)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0;
- } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
- struct vfio_iommu_type1_dirty_bitmap dirty;
- uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
- VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
- VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
- int ret = 0;
+}
- if (!iommu->v2)
- return -EACCES;
+static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ struct vfio_iommu_type1_dirty_bitmap dirty;
+ uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+ unsigned long minsz;
+ int ret = 0;
- minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
- flags);
+ if (!iommu->v2)
+ return -EACCES;
- if (copy_from_user(&dirty, (void __user *)arg, minsz))
- return -EFAULT;
+ minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
- if (dirty.argsz < minsz || dirty.flags & ~mask)
- return -EINVAL;
+ if (copy_from_user(&dirty, (void __user *)arg, minsz))
+ return -EFAULT;
- /* only one flag should be set at a time */
- if (__ffs(dirty.flags) != __fls(dirty.flags))
- return -EINVAL;
+ if (dirty.argsz < minsz || dirty.flags & ~mask)
+ return -EINVAL;
- if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
- size_t pgsize;
+ /* only one flag should be set at a time */
+ if (__ffs(dirty.flags) != __fls(dirty.flags))
+ return -EINVAL;
- mutex_lock(&iommu->lock);
- pgsize = 1 << __ffs(iommu->pgsize_bitmap);
- if (!iommu->dirty_page_tracking) {
- ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
- if (!ret)
- iommu->dirty_page_tracking = true;
- }
- mutex_unlock(&iommu->lock);
- return ret;
- } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
- mutex_lock(&iommu->lock);
- if (iommu->dirty_page_tracking) {
- iommu->dirty_page_tracking = false;
- vfio_dma_bitmap_free_all(iommu);
- }
- mutex_unlock(&iommu->lock);
- return 0;
- } else if (dirty.flags &
- VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
- struct vfio_iommu_type1_dirty_bitmap_get range;
- unsigned long pgshift;
- size_t data_size = dirty.argsz - minsz;
- size_t iommu_pgsize;
-
- if (!data_size || data_size < sizeof(range))
- return -EINVAL;
-
- if (copy_from_user(&range, (void __user *)(arg + minsz),
- sizeof(range)))
- return -EFAULT;
+ if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+ size_t pgsize;
- if (range.iova + range.size < range.iova)
- return -EINVAL;
- if (!access_ok((void __user *)range.bitmap.data,
- range.bitmap.size))
- return -EINVAL;
+ mutex_lock(&iommu->lock);
+ pgsize = 1 << __ffs(iommu->pgsize_bitmap);
+ if (!iommu->dirty_page_tracking) {
+ ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
+ if (!ret)
+ iommu->dirty_page_tracking = true;
+ }
+ mutex_unlock(&iommu->lock);
+ return ret;
+ } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+ mutex_lock(&iommu->lock);
+ if (iommu->dirty_page_tracking) {
+ iommu->dirty_page_tracking = false;
+ vfio_dma_bitmap_free_all(iommu);
+ }
+ mutex_unlock(&iommu->lock);
+ return 0;
+ } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+ struct vfio_iommu_type1_dirty_bitmap_get range;
+ unsigned long pgshift;
+ size_t data_size = dirty.argsz - minsz;
+ size_t iommu_pgsize;
- pgshift = __ffs(range.bitmap.pgsize);
- ret = verify_bitmap_size(range.size >> pgshift,
- range.bitmap.size);
- if (ret)
- return ret;
+ if (!data_size || data_size < sizeof(range))
+ return -EINVAL;
- mutex_lock(&iommu->lock);
+ if (copy_from_user(&range, (void __user *)(arg + minsz),
+ sizeof(range)))
+ return -EFAULT;
+
+ if (range.iova + range.size < range.iova)
+ return -EINVAL;
+ if (!access_ok((void __user *)range.bitmap.data,
+ range.bitmap.size))
+ return -EINVAL;
- iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+ pgshift = __ffs(range.bitmap.pgsize);
+ ret = verify_bitmap_size(range.size >> pgshift,
+ range.bitmap.size);
+ if (ret)
+ return ret;
- /* allow only smallest supported pgsize */
- if (range.bitmap.pgsize != iommu_pgsize) {
- ret = -EINVAL;
- goto out_unlock;
- }
- if (range.iova & (iommu_pgsize - 1)) {
- ret = -EINVAL;
- goto out_unlock;
- }
- if (!range.size || range.size & (iommu_pgsize - 1)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ mutex_lock(&iommu->lock);
- if (iommu->dirty_page_tracking)
- ret = vfio_iova_dirty_bitmap(range.bitmap.data,
- iommu, range.iova, range.size,
- range.bitmap.pgsize);
- else
- ret = -EINVAL;
-out_unlock:
- mutex_unlock(&iommu->lock);
+ iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
- return ret;
+ /* allow only smallest supported pgsize */
+ if (range.bitmap.pgsize != iommu_pgsize) {
+ ret = -EINVAL;
+ goto out_unlock;
}
+ if (range.iova & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!range.size || range.size & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (iommu->dirty_page_tracking)
+ ret = vfio_iova_dirty_bitmap(range.bitmap.data,
+ iommu, range.iova,
+ range.size,
+ range.bitmap.pgsize);
+ else
+ ret = -EINVAL;
+out_unlock:
+ mutex_unlock(&iommu->lock);
+
+ return ret;
}
- return -ENOTTY;
+ return -EINVAL;
+}
+
+static long vfio_iommu_type1_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_iommu *iommu = iommu_data;
+
+ switch (cmd) {
+ case VFIO_CHECK_EXTENSION:
+ return vfio_iommu_type1_check_extension(iommu, arg);
+ case VFIO_IOMMU_GET_INFO:
+ return vfio_iommu_type1_get_info(iommu, arg);
+ case VFIO_IOMMU_MAP_DMA:
+ return vfio_iommu_type1_map_dma(iommu, arg);
+ case VFIO_IOMMU_UNMAP_DMA:
+ return vfio_iommu_type1_unmap_dma(iommu, arg);
+ case VFIO_IOMMU_DIRTY_PAGES:
+ return vfio_iommu_type1_dirty_pages(iommu, arg);
+ default:
+ return -ENOTTY;
+ }
}
static int vfio_iommu_type1_register_notifier(void *iommu_data,
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index d3688c6afb87..587fbae06182 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -65,6 +65,7 @@ config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend"
depends on EVENTFD
select VHOST
+ select IRQ_BYPASS_MANAGER
depends on VDPA
help
This kernel module can be loaded in host kernel to accelerate
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 1f0ca6e44410..34aec4ba331e 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -159,8 +159,8 @@ vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
/**
- * vhost_iotlb_itree_first - return the next overlapped range
- * @iotlb: the IOTLB
+ * vhost_iotlb_itree_next - return the next overlapped range
+ * @map: the starting map node
* @start: start of IOVA range
* @end: end of IOVA range
*/
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index eea902b83afe..531a00d703cd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -73,7 +73,7 @@ enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
};
enum {
@@ -1615,21 +1615,6 @@ done:
return err;
}
-static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
-{
- int i;
-
- mutex_lock(&n->dev.mutex);
- for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
- mutex_lock(&n->vqs[i].vq.mutex);
- n->vqs[i].vq.acked_backend_features = features;
- mutex_unlock(&n->vqs[i].vq.mutex);
- }
- mutex_unlock(&n->dev.mutex);
-
- return 0;
-}
-
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
!vhost_log_access_ok(&n->dev))
goto out_unlock;
- if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&n->dev, true))
goto out_unlock;
}
@@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP;
- return vhost_net_set_backend_features(n, features);
+ vhost_set_backend_features(&n->dev, features);
+ return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index a54b60d6623f..3fab94f88894 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -27,37 +27,11 @@
#include "vhost.h"
enum {
- VHOST_VDPA_FEATURES =
- (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1ULL << VIRTIO_F_ANY_LAYOUT) |
- (1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
- (1ULL << VIRTIO_F_RING_PACKED) |
- (1ULL << VIRTIO_F_ORDER_PLATFORM) |
- (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
- (1ULL << VIRTIO_RING_F_EVENT_IDX),
-
- VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
- (1ULL << VIRTIO_NET_F_CSUM) |
- (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
- (1ULL << VIRTIO_NET_F_MTU) |
- (1ULL << VIRTIO_NET_F_MAC) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
- (1ULL << VIRTIO_NET_F_GUEST_ECN) |
- (1ULL << VIRTIO_NET_F_GUEST_UFO) |
- (1ULL << VIRTIO_NET_F_HOST_TSO4) |
- (1ULL << VIRTIO_NET_F_HOST_TSO6) |
- (1ULL << VIRTIO_NET_F_HOST_ECN) |
- (1ULL << VIRTIO_NET_F_HOST_UFO) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VIRTIO_NET_F_STATUS) |
- (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
+ VHOST_VDPA_BACKEND_FEATURES =
+ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
};
-/* Currently, only network backend w/o multiqueue is supported. */
-#define VHOST_VDPA_VQ_MAX 2
-
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
struct vhost_vdpa {
@@ -73,16 +47,13 @@ struct vhost_vdpa {
int virtio_id;
int minor;
struct eventfd_ctx *config_ctx;
+ int in_batch;
};
static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
-static const u64 vhost_vdpa_features[] = {
- [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
-};
-
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -96,7 +67,7 @@ static void handle_vq_kick(struct vhost_work *work)
static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
{
struct vhost_virtqueue *vq = private;
- struct eventfd_ctx *call_ctx = vq->call_ctx;
+ struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
eventfd_signal(call_ctx, 1);
@@ -115,12 +86,45 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
return IRQ_HANDLED;
}
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+ struct vdpa_device *vdpa = v->vdpa;
+ int ret, irq;
+
+ if (!ops->get_vq_irq)
+ return;
+
+ irq = ops->get_vq_irq(vdpa, qid);
+ spin_lock(&vq->call_ctx.ctx_lock);
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ if (!vq->call_ctx.ctx || irq < 0) {
+ spin_unlock(&vq->call_ctx.ctx_lock);
+ return;
+ }
+
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ vq->call_ctx.producer.irq = irq;
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+
+ spin_lock(&vq->call_ctx.ctx_lock);
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
static void vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- const struct vdpa_config_ops *ops = vdpa->config;
- ops->set_status(vdpa, 0);
+ vdpa_reset(vdpa);
+ v->in_batch = 0;
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -155,11 +159,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- u8 status;
+ u8 status, status_old;
+ int nvqs = v->nvqs;
+ u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
return -EFAULT;
+ status_old = ops->get_status(vdpa);
+
/*
* Userspace shouldn't remove status bits unless reset the
* status to 0.
@@ -169,6 +177,14 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
ops->set_status(vdpa, status);
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_setup_vq_irq(v, i);
+
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+
return 0;
}
@@ -196,7 +212,6 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
- const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
@@ -209,7 +224,7 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
if (!buf)
return -ENOMEM;
- ops->get_config(vdpa, config.off, buf, config.len);
+ vdpa_get_config(vdpa, config.off, buf, config.len);
if (copy_to_user(c->buf, buf, config.len)) {
kvfree(buf);
@@ -255,7 +270,6 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
u64 features;
features = ops->get_features(vdpa);
- features &= vhost_vdpa_features[v->virtio_id];
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
@@ -279,10 +293,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~vhost_vdpa_features[v->virtio_id])
- return -EINVAL;
-
- if (ops->set_features(vdpa, features))
+ if (vdpa_set_features(vdpa, features))
return -EINVAL;
return 0;
@@ -332,14 +343,18 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_vq_state vq_state;
struct vdpa_callback cb;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
+ u64 __user *featurep = argp;
+ u64 features;
u32 idx;
long r;
@@ -353,15 +368,32 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
idx = array_index_nospec(idx, v->nvqs);
vq = &v->vqs[idx];
- if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
- }
+ case VHOST_GET_VRING_BASE:
+ r = ops->get_vq_state(v->vdpa, idx, &vq_state);
+ if (r)
+ return r;
- if (cmd == VHOST_GET_VRING_BASE)
- vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+ vq->last_avail_idx = vq_state.avail_index;
+ break;
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VDPA_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&v->vdev, features);
+ return 0;
+ }
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
if (r)
@@ -377,12 +409,13 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
+ vq_state.avail_index = vq->last_avail_idx;
+ if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
break;
case VHOST_SET_VRING_CALL:
- if (vq->call_ctx) {
+ if (vq->call_ctx.ctx) {
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
} else {
@@ -390,6 +423,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.private = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
+ vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -519,13 +553,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (r)
return r;
- if (ops->dma_map)
+ if (ops->dma_map) {
r = ops->dma_map(vdpa, iova, size, pa, perm);
- else if (ops->set_map)
- r = ops->set_map(vdpa, dev->iotlb);
- else
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ r = ops->set_map(vdpa, dev->iotlb);
+ } else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
+ }
return r;
}
@@ -538,12 +574,14 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
- if (ops->dma_map)
+ if (ops->dma_map) {
ops->dma_unmap(vdpa, iova, size);
- else if (ops->set_map)
- ops->set_map(vdpa, dev->iotlb);
- else
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ ops->set_map(vdpa, dev->iotlb);
+ } else {
iommu_unmap(v->domain, iova, size);
+ }
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
@@ -636,6 +674,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
r = vhost_dev_check_owner(dev);
@@ -649,6 +689,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, msg->iova, msg->size);
break;
+ case VHOST_IOTLB_BATCH_BEGIN:
+ v->in_batch = true;
+ break;
+ case VHOST_IOTLB_BATCH_END:
+ if (v->in_batch && ops->set_map)
+ ops->set_map(vdpa, dev->iotlb);
+ v->in_batch = false;
+ break;
default:
r = -EINVAL;
break;
@@ -765,6 +813,18 @@ err:
return r;
}
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ for (i = 0; i < v->nvqs; i++) {
+ vq = &v->vqs[i];
+ if (vq->call_ctx.producer.irq)
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ }
+}
+
static int vhost_vdpa_release(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v = filep->private_data;
@@ -777,6 +837,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
+ vhost_vdpa_clean_irq(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
@@ -872,7 +933,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
{
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa *v;
- int minor, nvqs = VHOST_VDPA_VQ_MAX;
+ int minor;
int r;
/* Currently, we only accept the network devices. */
@@ -893,14 +954,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
atomic_set(&v->opened, 0);
v->minor = minor;
v->vdpa = vdpa;
- v->nvqs = nvqs;
+ v->nvqs = vdpa->nvqs;
v->virtio_id = ops->get_device_id(vdpa);
device_initialize(&v->dev);
v->dev.release = vhost_vdpa_release_dev;
v->dev.parent = &vdpa->dev;
v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
- v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
+ v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
GFP_KERNEL);
if (!v->vqs) {
r = -ENOMEM;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 74d135ee7e26..5857d4eec9d7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
__vhost_vq_meta_reset(d->vqs[i]);
}
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+ call_ctx->ctx = NULL;
+ memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+ spin_lock_init(&call_ctx->ctx_lock);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->kick = NULL;
- vq->call_ctx = NULL;
vq->log_ctx = NULL;
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
+ vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq);
}
@@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
- if (dev->vqs[i]->call_ctx)
- eventfd_ctx_put(dev->vqs[i]->call_ctx);
+ if (dev->vqs[i]->call_ctx.ctx)
+ eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
@@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
- mem.nregions * sizeof *m->regions)) {
+ flex_array_size(newmem, regions, mem.nregions))) {
kvfree(newmem);
return -EFAULT;
}
@@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = PTR_ERR(ctx);
break;
}
- swap(ctx, vq->call_ctx);
+
+ spin_lock(&vq->call_ctx.ctx_lock);
+ swap(ctx, vq->call_ctx.ctx);
+ spin_unlock(&vq->call_ctx.ctx_lock);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
@@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx && vhost_notify(dev, vq))
- eventfd_signal(vq->call_ctx, 1);
+ if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+ eventfd_signal(vq->call_ctx.ctx, 1);
}
EXPORT_SYMBOL_GPL(vhost_signal);
@@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_backend_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
static int __init vhost_init(void)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index c8e96a095d3b..9032d3c2a9f4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -13,6 +13,7 @@
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
#include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -60,6 +61,12 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3,
};
+struct vhost_vring_call {
+ struct eventfd_ctx *ctx;
+ struct irq_bypass_producer producer;
+ spinlock_t ctx_lock;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -72,7 +79,7 @@ struct vhost_virtqueue {
vring_used_t __user *used;
const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
- struct eventfd_ctx *call_ctx;
+ struct vhost_vring_call call_ctx;
struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx;
@@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 20d96a5ac384..25e409bbb1a2 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -121,18 +121,7 @@ out:
static int pm860x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return pm860x_backlight_set(bl, brightness);
+ return pm860x_backlight_set(bl, backlight_get_brightness(bl));
}
static int pm860x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 7d22d7377606..87f9fc238d28 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX
To compile this driver as a module, choose M here: the module will
be called ep93xx_bl.
-config BACKLIGHT_GENERIC
- tristate "Generic (aka Sharp Corgi) Backlight Driver"
- default y
- help
- Say y to enable the generic platform backlight driver previously
- known as the Corgi backlight driver. If you have a Sharp Zaurus
- SL-C7xx, SL-Cxx00 or SL-6000x say y.
-
config BACKLIGHT_IPAQ_MICRO
tristate "iPAQ microcontroller backlight driver"
depends on MFD_IPAQ_MICRO
@@ -386,13 +378,6 @@ config BACKLIGHT_LP8788
help
This supports TI LP8788 backlight driver.
-config BACKLIGHT_OT200
- tristate "Backlight driver for ot200 visualisation device"
- depends on CS5535_MFGPT && GPIO_CS5535
- help
- To compile this driver as a module, choose M here: the module will be
- called ot200_bl.
-
config BACKLIGHT_PANDORA
tristate "Backlight driver for Pandora console"
depends on TWL4030_CORE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 0c1a1524627a..13463b99f1f9 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
-obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
@@ -45,7 +44,6 @@ obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
-obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 0f63f76723a5..686988c3df3a 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -65,15 +65,7 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
static int adp5520_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp5520_bl_set(bl, brightness);
+ return adp5520_bl_set(bl, backlight_get_brightness(bl));
}
static int adp5520_bl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 19968104fc47..8ec19425671f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -361,15 +361,7 @@ static int adp8860_bl_set(struct backlight_device *bl, int brightness)
static int adp8860_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp8860_bl_set(bl, brightness);
+ return adp8860_bl_set(bl, backlight_get_brightness(bl));
}
static int adp8860_bl_get_brightness(struct backlight_device *bl)
@@ -689,7 +681,7 @@ static int adp8860_probe(struct i2c_client *client,
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
data->gdwn_dis = !!pdata->gdwn_dis;
- /* fall through */
+ fallthrough;
case ADP8860_MANUFID:
data->en_ambl_sens = !!pdata->en_ambl_sens;
break;
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 4c0032010cfe..8b5213a39527 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -399,15 +399,7 @@ static int adp8870_bl_set(struct backlight_device *bl, int brightness)
static int adp8870_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp8870_bl_set(bl, brightness);
+ return adp8870_bl_set(bl, backlight_get_brightness(bl));
}
static int adp8870_bl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 33f0f0f2e8b3..3b60019cdc2b 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -104,17 +104,10 @@ static int as3711_bl_update_status(struct backlight_device *bl)
struct as3711_bl_data *data = bl_get_data(bl);
struct as3711_bl_supply *supply = to_supply(data);
struct as3711 *as3711 = supply->as3711;
- int brightness = bl->props.brightness;
+ int brightness;
int ret = 0;
- dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n",
- __func__, bl->props.brightness, bl->props.power,
- bl->props.fb_blank, bl->props.state);
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ brightness = backlight_get_brightness(bl);
if (data->type == AS3711_BL_SU1) {
ret = as3711_set_brightness_v(as3711, brightness,
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 92d80aa0c0ef..537fe1b376ad 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -22,6 +22,47 @@
#include <asm/backlight.h>
#endif
+/**
+ * DOC: overview
+ *
+ * The backlight core supports implementing backlight drivers.
+ *
+ * A backlight driver registers a driver using
+ * devm_backlight_device_register(). The properties of the backlight
+ * driver such as type and max_brightness must be specified.
+ * When the core detect changes in for example brightness or power state
+ * the update_status() operation is called. The backlight driver shall
+ * implement this operation and use it to adjust backlight.
+ *
+ * Several sysfs attributes are provided by the backlight core::
+ *
+ * - brightness R/W, set the requested brightness level
+ * - actual_brightness RO, the brightness level used by the HW
+ * - max_brightness RO, the maximum brightness level supported
+ *
+ * See Documentation/ABI/stable/sysfs-class-backlight for the full list.
+ *
+ * The backlight can be adjusted using the sysfs interface, and
+ * the backlight driver may also support adjusting backlight using
+ * a hot-key or some other platform or firmware specific way.
+ *
+ * The driver must implement the get_brightness() operation if
+ * the HW do not support all the levels that can be specified in
+ * brightness, thus providing user-space access to the actual level
+ * via the actual_brightness attribute.
+ *
+ * When the backlight changes this is reported to user-space using
+ * an uevent connected to the actual_brightness attribute.
+ * When brightness is set by platform specific means, for example
+ * a hot-key to adjust backlight, the driver must notify the backlight
+ * core that brightness has changed using backlight_force_update().
+ *
+ * The backlight driver core receives notifications from fbdev and
+ * if the event is FB_EVENT_BLANK and if the value of blank, from the
+ * FBIOBLANK ioctrl, results in a change in the backlight state the
+ * update_status() operation is called.
+ */
+
static struct list_head backlight_dev_list;
static struct mutex backlight_dev_list_mutex;
static struct blocking_notifier_head backlight_notifier;
@@ -40,9 +81,17 @@ static const char *const backlight_scale_types[] = {
#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is and necessary, we're switching backlight power as well ...
+/*
+ * fb_notifier_callback
+ *
+ * This callback gets called when something important happens inside a
+ * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
+ * which is reported to the driver using backlight_update_status()
+ * as a state change.
+ *
+ * There may be several fbdev's connected to the backlight device,
+ * in which case they are kept track of. A state change is only reported
+ * if there is a change in backlight for the specified fbdev.
*/
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
@@ -58,28 +107,29 @@ static int fb_notifier_callback(struct notifier_block *self,
bd = container_of(self, struct backlight_device, fb_notif);
mutex_lock(&bd->ops_lock);
- if (bd->ops)
- if (!bd->ops->check_fb ||
- bd->ops->check_fb(bd, evdata->info)) {
- fb_blank = *(int *)evdata->data;
- if (fb_blank == FB_BLANK_UNBLANK &&
- !bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = true;
- if (!bd->use_count++) {
- bd->props.state &= ~BL_CORE_FBBLANK;
- bd->props.fb_blank = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- }
- } else if (fb_blank != FB_BLANK_UNBLANK &&
- bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = false;
- if (!(--bd->use_count)) {
- bd->props.state |= BL_CORE_FBBLANK;
- bd->props.fb_blank = fb_blank;
- backlight_update_status(bd);
- }
- }
+
+ if (!bd->ops)
+ goto out;
+ if (bd->ops->check_fb && !bd->ops->check_fb(bd, evdata->info))
+ goto out;
+
+ fb_blank = *(int *)evdata->data;
+ if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = true;
+ if (!bd->use_count++) {
+ bd->props.state &= ~BL_CORE_FBBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
}
+ } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = false;
+ if (!(--bd->use_count)) {
+ bd->props.state |= BL_CORE_FBBLANK;
+ bd->props.fb_blank = fb_blank;
+ backlight_update_status(bd);
+ }
+ }
+out:
mutex_unlock(&bd->ops_lock);
return 0;
}
@@ -320,9 +370,13 @@ ATTRIBUTE_GROUPS(bl_device);
* backlight_force_update - tell the backlight subsystem that hardware state
* has changed
* @bd: the backlight device to update
+ * @reason: reason for update
*
* Updates the internal state of the backlight in response to a hardware event,
- * and generate a uevent to notify userspace
+ * and generates an uevent to notify userspace. A backlight driver shall call
+ * backlight_force_update() when the backlight is changed using, for example,
+ * a hot-key. The updated brightness is read using get_brightness() and the
+ * brightness value is reported using an uevent.
*/
void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason)
@@ -335,19 +389,7 @@ void backlight_force_update(struct backlight_device *bd,
}
EXPORT_SYMBOL(backlight_force_update);
-/**
- * backlight_device_register - create and register a new object of
- * backlight_device class.
- * @name: the name of the new object(must be the same as the name of the
- * respective framebuffer device).
- * @parent: a pointer to the parent device
- * @devdata: an optional pointer to be stored for private driver use. The
- * methods may retrieve it by using bl_get_data(bd).
- * @ops: the backlight operations structure.
- *
- * Creates and registers new backlight device. Returns either an
- * ERR_PTR() or a pointer to the newly allocated device.
- */
+/* deprecated - use devm_backlight_device_register() */
struct backlight_device *backlight_device_register(const char *name,
struct device *parent, void *devdata, const struct backlight_ops *ops,
const struct backlight_properties *props)
@@ -414,6 +456,15 @@ struct backlight_device *backlight_device_register(const char *name,
}
EXPORT_SYMBOL(backlight_device_register);
+/** backlight_device_get_by_type - find first backlight device of a type
+ * @type: the type of backlight device
+ *
+ * Look up the first backlight device of the specified type
+ *
+ * RETURNS:
+ *
+ * Pointer to backlight device if any was found. Otherwise NULL.
+ */
struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
{
bool found = false;
@@ -453,12 +504,7 @@ struct backlight_device *backlight_device_get_by_name(const char *name)
}
EXPORT_SYMBOL(backlight_device_get_by_name);
-/**
- * backlight_device_unregister - unregisters a backlight device object.
- * @bd: the backlight device object to be unregistered and freed.
- *
- * Unregisters a previously registered via backlight_device_register object.
- */
+/* deprecated - use devm_backlight_device_unregister() */
void backlight_device_unregister(struct backlight_device *bd)
{
if (!bd)
@@ -506,10 +552,12 @@ static int devm_backlight_device_match(struct device *dev, void *res,
* backlight_register_notifier - get notified of backlight (un)registration
* @nb: notifier block with the notifier to call on backlight (un)registration
*
- * @return 0 on success, otherwise a negative error code
- *
* Register a notifier to get notified when backlight devices get registered
* or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
*/
int backlight_register_notifier(struct notifier_block *nb)
{
@@ -521,10 +569,12 @@ EXPORT_SYMBOL(backlight_register_notifier);
* backlight_unregister_notifier - unregister a backlight notifier
* @nb: notifier block to unregister
*
- * @return 0 on success, otherwise a negative error code
- *
* Register a notifier to get notified when backlight devices get registered
* or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
*/
int backlight_unregister_notifier(struct notifier_block *nb)
{
@@ -533,19 +583,21 @@ int backlight_unregister_notifier(struct notifier_block *nb)
EXPORT_SYMBOL(backlight_unregister_notifier);
/**
- * devm_backlight_device_register - resource managed backlight_device_register()
+ * devm_backlight_device_register - register a new backlight device
* @dev: the device to register
* @name: the name of the device
- * @parent: a pointer to the parent device
+ * @parent: a pointer to the parent device (often the same as @dev)
* @devdata: an optional pointer to be stored for private driver use
* @ops: the backlight operations structure
* @props: the backlight properties
*
- * @return a struct backlight on success, or an ERR_PTR on error
+ * Creates and registers new backlight device. When a backlight device
+ * is registered the configuration must be specified in the @props
+ * parameter. See description of &backlight_properties.
+ *
+ * RETURNS:
*
- * Managed backlight_device_register(). The backlight_device returned
- * from this function are automatically freed on driver detach.
- * See backlight_device_register() for more information.
+ * struct backlight on success, or an ERR_PTR on error
*/
struct backlight_device *devm_backlight_device_register(struct device *dev,
const char *name, struct device *parent, void *devdata,
@@ -573,13 +625,13 @@ struct backlight_device *devm_backlight_device_register(struct device *dev,
EXPORT_SYMBOL(devm_backlight_device_register);
/**
- * devm_backlight_device_unregister - resource managed backlight_device_unregister()
+ * devm_backlight_device_unregister - unregister backlight device
* @dev: the device to unregister
* @bd: the backlight device to unregister
*
- * Deallocated a backlight allocated with devm_backlight_device_register().
+ * Deallocates a backlight allocated with devm_backlight_device_register().
* Normally this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * code will ensure that the resources are freed.
*/
void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd)
@@ -621,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node)
EXPORT_SYMBOL(of_find_backlight_by_node);
#endif
-/**
- * of_find_backlight - Get backlight device
- * @dev: Device
- *
- * This function looks for a property named 'backlight' on the DT node
- * connected to @dev and looks up the backlight device.
- *
- * Call backlight_put() to drop the reference on the backlight device.
- *
- * Returns:
- * A pointer to the backlight device if found.
- * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
- * device is found.
- * NULL if there's no backlight property.
- */
-struct backlight_device *of_find_backlight(struct device *dev)
+static struct backlight_device *of_find_backlight(struct device *dev)
{
struct backlight_device *bd = NULL;
struct device_node *np;
@@ -662,20 +699,29 @@ struct backlight_device *of_find_backlight(struct device *dev)
return bd;
}
-EXPORT_SYMBOL(of_find_backlight);
static void devm_backlight_release(void *data)
{
- backlight_put(data);
+ struct backlight_device *bd = data;
+
+ if (bd)
+ put_device(&bd->dev);
}
/**
- * devm_of_find_backlight - Resource-managed of_find_backlight()
- * @dev: Device
+ * devm_of_find_backlight - find backlight for a device
+ * @dev: the device
*
- * Device managed version of of_find_backlight().
- * The reference on the backlight device is automatically
+ * This function looks for a property named 'backlight' on the DT node
+ * connected to @dev and looks up the backlight device. The lookup is
+ * device managed so the reference to the backlight device is automatically
* dropped on driver detach.
+ *
+ * RETURNS:
+ *
+ * A pointer to the backlight device if found.
+ * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
+ * device is found. NULL if there's no backlight property.
*/
struct backlight_device *devm_of_find_backlight(struct device *dev)
{
@@ -687,7 +733,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev)
return bd;
ret = devm_add_action(dev, devm_backlight_release, bd);
if (ret) {
- backlight_put(bd);
+ put_device(&bd->dev);
return ERR_PTR(ret);
}
return bd;
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index d5d5fb457e78..515184fbe33a 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -82,12 +82,7 @@ static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data)
static int bd6107_backlight_update_status(struct backlight_device *backlight)
{
struct bd6107 *bd = bl_get_data(backlight);
- int brightness = backlight->props.brightness;
-
- if (backlight->props.power != FB_BLANK_UNBLANK ||
- backlight->props.fb_blank != FB_BLANK_UNBLANK ||
- backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(backlight);
if (brightness) {
bd6107_write(bd, BD6107_PORTSEL, BD6107_PORTSEL_LEDM(2) |
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 25ef0cbd7583..33f5d80495e6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -420,13 +420,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
static int corgi_bl_update_status(struct backlight_device *bd)
{
struct corgi_lcd *lcd = bl_get_data(bd);
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
-
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
+ int intensity = backlight_get_brightness(bd);
if (corgibl_flags & CORGIBL_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 4624b7b7c6a6..4ad0a72531fe 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -59,26 +59,18 @@ struct cr_panel {
static int cr_backlight_set_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
u32 addr = gpio_bar + CRVML_PANEL_PORT;
u32 cur = inl(addr);
- if (bd->props.power == FB_BLANK_UNBLANK)
- intensity = FB_BLANK_UNBLANK;
- if (bd->props.fb_blank == FB_BLANK_UNBLANK)
- intensity = FB_BLANK_UNBLANK;
- if (bd->props.power == FB_BLANK_POWERDOWN)
- intensity = FB_BLANK_POWERDOWN;
- if (bd->props.fb_blank == FB_BLANK_POWERDOWN)
- intensity = FB_BLANK_POWERDOWN;
-
- if (intensity == FB_BLANK_UNBLANK) { /* FULL ON */
- cur &= ~CRVML_BACKLIGHT_OFF;
- outl(cur, addr);
- } else if (intensity == FB_BLANK_POWERDOWN) { /* OFF */
+ if (backlight_get_brightness(bd) == 0) {
+ /* OFF */
cur |= CRVML_BACKLIGHT_OFF;
outl(cur, addr);
- } /* anything else, don't bother */
+ } else {
+ /* FULL ON */
+ cur &= ~CRVML_BACKLIGHT_OFF;
+ outl(cur, addr);
+ }
return 0;
}
@@ -90,9 +82,9 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
u8 intensity;
if (cur & CRVML_BACKLIGHT_OFF)
- intensity = FB_BLANK_POWERDOWN;
+ intensity = 0;
else
- intensity = FB_BLANK_UNBLANK;
+ intensity = 1;
return intensity;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 62540e4bdedb..71f21bbc7a9f 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -77,18 +77,7 @@ static int da903x_backlight_set(struct backlight_device *bl, int brightness)
static int da903x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return da903x_backlight_set(bl, brightness);
+ return da903x_backlight_set(bl, backlight_get_brightness(bl));
}
static int da903x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 4149e0b2f83c..2387009d452d 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -36,13 +36,7 @@ static int ep93xxbl_set(struct backlight_device *bl, int brightness)
static int ep93xxbl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return ep93xxbl_set(bl, brightness);
+ return ep93xxbl_set(bl, backlight_get_brightness(bl));
}
static int ep93xxbl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
deleted file mode 100644
index 8fe63dbc8590..000000000000
--- a/drivers/video/backlight/generic_bl.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic Backlight Driver
- *
- * Copyright (c) 2004-2008 Richard Purdie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-
-static int genericbl_intensity;
-static struct backlight_device *generic_backlight_device;
-static struct generic_bl_info *bl_machinfo;
-
-static int genericbl_send_intensity(struct backlight_device *bd)
-{
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.state & BL_CORE_FBBLANK)
- intensity = 0;
- if (bd->props.state & BL_CORE_SUSPENDED)
- intensity = 0;
-
- bl_machinfo->set_bl_intensity(intensity);
-
- genericbl_intensity = intensity;
-
- if (bl_machinfo->kick_battery)
- bl_machinfo->kick_battery();
-
- return 0;
-}
-
-static int genericbl_get_intensity(struct backlight_device *bd)
-{
- return genericbl_intensity;
-}
-
-static const struct backlight_ops genericbl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = genericbl_get_intensity,
- .update_status = genericbl_send_intensity,
-};
-
-static int genericbl_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev);
- const char *name = "generic-bl";
- struct backlight_device *bd;
-
- bl_machinfo = machinfo;
- if (!machinfo->limit_mask)
- machinfo->limit_mask = -1;
-
- if (machinfo->name)
- name = machinfo->name;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = machinfo->max_intensity;
- bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev,
- NULL, &genericbl_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- platform_set_drvdata(pdev, bd);
-
- bd->props.power = FB_BLANK_UNBLANK;
- bd->props.brightness = machinfo->default_intensity;
- backlight_update_status(bd);
-
- generic_backlight_device = bd;
-
- dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n");
- return 0;
-}
-
-static int genericbl_remove(struct platform_device *pdev)
-{
- struct backlight_device *bd = platform_get_drvdata(pdev);
-
- bd->props.power = 0;
- bd->props.brightness = 0;
- backlight_update_status(bd);
-
- dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n");
- return 0;
-}
-
-static struct platform_driver genericbl_driver = {
- .probe = genericbl_probe,
- .remove = genericbl_remove,
- .driver = {
- .name = "generic-bl",
- },
-};
-
-module_platform_driver(genericbl_driver);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Generic Backlight Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 75409ddfba3e..6f78d928f054 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -21,24 +21,11 @@ struct gpio_backlight {
struct gpio_desc *gpiod;
};
-static int gpio_backlight_get_next_brightness(struct backlight_device *bl)
-{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
-
- return brightness;
-}
-
static int gpio_backlight_update_status(struct backlight_device *bl)
{
struct gpio_backlight *gbl = bl_get_data(bl);
- int brightness = gpio_backlight_get_next_brightness(bl);
- gpiod_set_value_cansleep(gbl->gpiod, brightness);
+ gpiod_set_value_cansleep(gbl->gpiod, backlight_get_brightness(bl));
return 0;
}
@@ -108,7 +95,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
bl->props.brightness = 1;
- init_brightness = gpio_backlight_get_next_brightness(bl);
+ init_brightness = backlight_get_brightness(bl);
ret = gpiod_direction_output(gbl->gpiod, init_brightness);
if (ret) {
dev_err(dev, "failed to set initial brightness\n");
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 8ea42b8d9bc8..9123c33def05 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -33,12 +33,8 @@ static void hp680bl_send_intensity(struct backlight_device *bd)
{
unsigned long flags;
u16 v;
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (hp680bl_suspended)
intensity = 0;
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index 9c5aa3fbb284..328aba9cddad 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -107,6 +107,8 @@
* lower frequency when the registers are read/written.
* The macro sets the frequency in the spi_transfer structure if
* the frequency exceeds the maximum value.
+ * @s: pointer to an SPI device
+ * @x: pointer to the read/write buffer pair
*/
#define CHECK_FREQ_REG(s, x) \
do { \
@@ -121,7 +123,7 @@
#define set_tx_byte(b) (tx_invert ? ~(b) : b)
-/**
+/*
* ili922x_id - id as set by manufacturer
*/
static int ili922x_id = 1;
@@ -130,7 +132,7 @@ module_param(ili922x_id, int, 0);
static int tx_invert;
module_param(tx_invert, int, 0);
-/**
+/*
* driver's private structure
*/
struct ili922x {
@@ -293,6 +295,8 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
#ifdef DEBUG
/**
* ili922x_reg_dump - dump all registers
+ *
+ * @spi: pointer to an SPI device
*/
static void ili922x_reg_dump(struct spi_device *spi)
{
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index f0385f9cf9da..996f7ba3b373 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -54,7 +54,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
jornada_ssp_start();
/* If backlight is off then really turn it off */
- if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
+ if (backlight_is_blank(bd)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
dev_info(&bd->dev, "brightness off timeout\n");
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 1dfe13c18925..55794b239cff 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -87,12 +87,8 @@ static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
static int kb3886bl_send_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (kb3886bl_flags & KB3886BL_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 78b033358625..db56e465aaff 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -179,6 +179,7 @@ ATTRIBUTE_GROUPS(lcd_device);
* lcd_device_register - register a new object of lcd_device class.
* @name: the name of the new object(must be the same as the name of the
* respective framebuffer device).
+ * @parent: pointer to the parent's struct device .
* @devdata: an optional pointer to be stored in the device. The
* methods may retrieve it by using lcd_get_data(ld).
* @ops: the lcd operations structure.
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index 3f66549997c8..f54d256e2d54 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -54,12 +54,7 @@ static void led_bl_power_off(struct led_bl_data *priv)
static int led_bl_update_status(struct backlight_device *bl)
{
struct led_bl_data *priv = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
if (brightness > 0)
led_bl_set_brightness(priv, brightness);
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index ee09d1bd02b9..1df1b6643c0b 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -39,14 +39,8 @@ static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
static int lm3533_bl_update_status(struct backlight_device *bd)
{
struct lm3533_bl *bl = bl_get_data(bd);
- int brightness = bd->props.brightness;
- if (bd->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+ return lm3533_ctrlbank_set_brightness(&bl->cb, backlight_get_brightness(bd));
}
static int lm3533_bl_get_brightness(struct backlight_device *bd)
@@ -235,7 +229,7 @@ static struct attribute *lm3533_bl_attributes[] = {
static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct lm3533_bl *bl = dev_get_drvdata(dev);
umode_t mode = attr->mode;
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index ee320883b710..e88a2b0e5904 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -391,7 +391,7 @@ static int lm3630a_parse_led_sources(struct fwnode_handle *node,
return ret;
for (i = 0; i < num_sources; i++) {
- if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1)
+ if (sources[i] != LM3630A_SINK_0 && sources[i] != LM3630A_SINK_1)
return -EINVAL;
ret |= BIT(sources[i]);
@@ -412,7 +412,7 @@ static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata,
if (ret)
return ret;
- if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1)
+ if (bank != LM3630A_BANK_0 && bank != LM3630A_BANK_1)
return -EINVAL;
led_sources = lm3630a_parse_led_sources(node, BIT(bank));
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index 8ae32e3573c1..f949b66dce1b 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -9,7 +9,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -89,14 +88,6 @@ static const unsigned char seq_rgb_gamma[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-static const unsigned char seq_up_dn[] = {
- 0x36, 0x10,
-};
-
-static const unsigned char seq_sleep_in[] = {
- 0x10,
-};
-
static const unsigned char seq_sleep_out[] = {
0x11,
};
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index cdc02e04f89d..297ee2e1ab0b 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -111,12 +111,8 @@ static int current_intensity;
static int locomolcd_set_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (locomolcd_flags & LOCOMOLCD_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index c6ad73a784e2..1842ae9a55f8 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -46,12 +46,7 @@ static int lv5207lp_write(struct lv5207lp *lv, u8 reg, u8 data)
static int lv5207lp_backlight_update_status(struct backlight_device *backlight)
{
struct lv5207lp *lv = bl_get_data(backlight);
- int brightness = backlight->props.brightness;
-
- if (backlight->props.power != FB_BLANK_UNBLANK ||
- backlight->props.fb_blank != FB_BLANK_UNBLANK ||
- backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(backlight);
if (brightness) {
lv5207lp_write(lv, LV5207LP_CTRL1,
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 97cc260ff9d1..e607ec6fd4bf 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -64,18 +64,7 @@ out:
static int max8925_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return max8925_backlight_set(bl, brightness);
+ return max8925_backlight_set(bl, backlight_get_brightness(bl));
}
static int max8925_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
deleted file mode 100644
index 23ee7106c72a..000000000000
--- a/drivers/video/backlight/ot200_bl.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Bachmann electronic GmbH
- * Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * Backlight driver for ot200 visualisation device from
- * Bachmann electronic GmbH.
- */
-
-#include <linux/module.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/cs5535.h>
-
-static struct cs5535_mfgpt_timer *pwm_timer;
-
-/* this array defines the mapping of brightness in % to pwm frequency */
-static const u8 dim_table[101] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
- 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9,
- 10, 10, 11, 11, 12, 12, 13, 14, 15, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28,
- 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 50,
- 53, 55, 58, 61, 65, 68, 72, 75, 79, 84, 88,
- 93, 97, 103, 108, 114, 120, 126, 133, 140,
- 147, 155, 163};
-
-struct ot200_backlight_data {
- int current_brightness;
-};
-
-#define GPIO_DIMM 27
-#define SCALE 1
-#define CMP1MODE 0x2 /* compare on GE; output high on compare
- * greater than or equal */
-#define PWM_SETUP (SCALE | CMP1MODE << 6 | MFGPT_SETUP_CNTEN)
-#define MAX_COMP2 163
-
-static int ot200_backlight_update_status(struct backlight_device *bl)
-{
- struct ot200_backlight_data *data = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
- /* enable or disable PWM timer */
- if (brightness == 0)
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, 0);
- else if (data->current_brightness == 0) {
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN);
- }
-
- /* apply new brightness value */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
- MAX_COMP2 - dim_table[brightness]);
- data->current_brightness = brightness;
-
- return 0;
-}
-
-static int ot200_backlight_get_brightness(struct backlight_device *bl)
-{
- struct ot200_backlight_data *data = bl_get_data(bl);
- return data->current_brightness;
-}
-
-static const struct backlight_ops ot200_backlight_ops = {
- .update_status = ot200_backlight_update_status,
- .get_brightness = ot200_backlight_get_brightness,
-};
-
-static int ot200_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_device *bl;
- struct ot200_backlight_data *data;
- struct backlight_properties props;
- int retval = 0;
-
- /* request gpio */
- if (devm_gpio_request(&pdev->dev, GPIO_DIMM,
- "ot200 backlight dimmer") < 0) {
- dev_err(&pdev->dev, "failed to request GPIO %d\n", GPIO_DIMM);
- return -ENODEV;
- }
-
- /* request timer */
- pwm_timer = cs5535_mfgpt_alloc_timer(7, MFGPT_DOMAIN_ANY);
- if (!pwm_timer) {
- dev_err(&pdev->dev, "MFGPT 7 not available\n");
- return -ENODEV;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- retval = -ENOMEM;
- goto error_devm_kzalloc;
- }
-
- /* setup gpio */
- cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_ENABLE);
- cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_AUX1);
-
- /* setup timer */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP2, MAX_COMP2);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, PWM_SETUP);
-
- data->current_brightness = 100;
- props.max_brightness = 100;
- props.brightness = 100;
- props.type = BACKLIGHT_RAW;
-
- bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
- &pdev->dev, data, &ot200_backlight_ops,
- &props);
- if (IS_ERR(bl)) {
- dev_err(&pdev->dev, "failed to register backlight\n");
- retval = PTR_ERR(bl);
- goto error_devm_kzalloc;
- }
-
- platform_set_drvdata(pdev, bl);
-
- return 0;
-
-error_devm_kzalloc:
- cs5535_mfgpt_free_timer(pwm_timer);
- return retval;
-}
-
-static int ot200_backlight_remove(struct platform_device *pdev)
-{
- /* on module unload set brightness to 100% */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
- MAX_COMP2 - dim_table[100]);
-
- cs5535_mfgpt_free_timer(pwm_timer);
-
- return 0;
-}
-
-static struct platform_driver ot200_backlight_driver = {
- .driver = {
- .name = "ot200-backlight",
- },
- .probe = ot200_backlight_probe,
- .remove = ot200_backlight_remove,
-};
-
-module_platform_driver(ot200_backlight_driver);
-
-MODULE_DESCRIPTION("backlight driver for ot200 visualisation device");
-MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ot200-backlight");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 82b8d7594701..dfc760830eb9 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -108,14 +108,9 @@ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
- int brightness = bl->props.brightness;
+ int brightness = backlight_get_brightness(bl);
struct pwm_state state;
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
if (pb->notify)
brightness = pb->notify(pb->dev, brightness);
@@ -606,7 +601,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->scale = data->max_brightness;
}
- pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
+ pb->lth_brightness = data->lth_brightness * (div_u64(state.period,
+ pb->scale));
props.type = BACKLIGHT_RAW;
props.max_brightness = data->max_brightness;
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 4c8c34b99441..3bc7800eb0a9 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -433,14 +433,9 @@ static int wled5_ovp_delay(struct wled *wled)
static int wled_update_status(struct backlight_device *bl)
{
struct wled *wled = bl_get_data(bl);
- u16 brightness = bl->props.brightness;
+ u16 brightness = backlight_get_brightness(bl);
int rc = 0;
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
mutex_lock(&wled->lock);
if (brightness) {
rc = wled->wled_set_brightness(wled, brightness);
@@ -1287,14 +1282,6 @@ static const struct wled_var_cfg wled4_string_i_limit_cfg = {
.size = ARRAY_SIZE(wled4_string_i_limit_values),
};
-static const struct wled_var_cfg wled3_string_cfg = {
- .size = 8,
-};
-
-static const struct wled_var_cfg wled4_string_cfg = {
- .size = 16,
-};
-
static const struct wled_var_cfg wled5_mod_sel_cfg = {
.size = 2,
};
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 2355f00f5773..0ce181585008 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -8,15 +8,13 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/slab.h>
/* registers */
@@ -42,6 +40,29 @@
#define SKY81452_DEFAULT_NAME "lcd-backlight"
#define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1)
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ * If it is not defined, default name is lcd-backlight.
+ * @gpiod_enable:GPIO descriptor which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detection_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ struct gpio_desc *gpiod_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
#define CTZ(b) __builtin_ctz(b)
static int sky81452_bl_update_status(struct backlight_device *bd)
@@ -182,7 +203,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm");
pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode");
pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift");
- pdata->gpio_enable = of_get_gpio(np, 0);
+ pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
ret = of_property_count_u32_elems(np, "led-sources");
if (ret < 0) {
@@ -252,26 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap *regmap = dev_get_drvdata(dev->parent);
- struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev);
+ struct sky81452_bl_platform_data *pdata;
struct backlight_device *bd;
struct backlight_properties props;
const char *name;
int ret;
- if (!pdata) {
- pdata = sky81452_bl_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
-
- if (gpio_is_valid(pdata->gpio_enable)) {
- ret = devm_gpio_request_one(dev, pdata->gpio_enable,
- GPIOF_OUT_INIT_HIGH, "sky81452-en");
- if (ret < 0) {
- dev_err(dev, "failed to request GPIO. err=%d\n", ret);
- return ret;
- }
- }
+ pdata = sky81452_bl_parse_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
ret = sky81452_bl_init_device(regmap, pdata);
if (ret < 0) {
@@ -312,8 +322,8 @@ static int sky81452_bl_remove(struct platform_device *pdev)
bd->props.brightness = 0;
backlight_update_status(bd);
- if (gpio_is_valid(pdata->gpio_enable))
- gpio_set_value_cansleep(pdata->gpio_enable, 0);
+ if (pdata->gpiod_enable)
+ gpiod_set_value_cansleep(pdata->gpiod_enable, 0);
return 0;
}
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 762e3feed097..8457166f357f 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -77,15 +77,7 @@ static int tps65217_bl_update_status(struct backlight_device *bl)
{
struct tps65217_bl *tps65217_bl = bl_get_data(bl);
int rc;
- int brightness = bl->props.brightness;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- if ((bl->props.power != FB_BLANK_UNBLANK) ||
- (bl->props.fb_blank != FB_BLANK_UNBLANK))
- /* framebuffer in low power mode or blanking active */
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
if (brightness > 0) {
rc = tps65217_reg_write(tps65217_bl->tps,
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index e55977d54c15..c5aaee205bdf 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -91,18 +91,7 @@ err:
static int wm831x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return wm831x_backlight_set(bl, brightness);
+ return wm831x_backlight_set(bl, backlight_get_brightness(bl));
}
static int wm831x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 09a9ad901dad..bcc92aecf666 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -857,7 +857,7 @@ static void acornfb_parse_dram(char *opt)
case 'M':
case 'm':
size *= 1024;
- /* Fall through */
+ fallthrough;
case 'K':
case 'k':
size *= 1024;
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 6f7838979f0a..ae3d8e8b8d33 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -419,7 +419,7 @@ static int arcfb_ioctl(struct fb_info *info,
schedule();
finish_wait(&arcfb_waitq, &wait);
}
- /* fall through */
+ fallthrough;
case FBIO_GETCONTROL2:
{
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 1e252192569a..bfd2f00b403b 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -508,7 +508,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
case 32:
var->transp.offset = 24;
var->transp.length = 8;
- /* fall through */
+ fallthrough;
case 24:
if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:888 mode */
@@ -633,7 +633,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
case 2: value |= ATMEL_LCDC_PIXELSIZE_2; break;
case 4: value |= ATMEL_LCDC_PIXELSIZE_4; break;
case 8: value |= ATMEL_LCDC_PIXELSIZE_8; break;
- case 15: /* fall through */
+ case 15:
case 16: value |= ATMEL_LCDC_PIXELSIZE_16; break;
case 24: value |= ATMEL_LCDC_PIXELSIZE_24; break;
case 32: value |= ATMEL_LCDC_PIXELSIZE_32; break;
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 7c4483c7f313..f3d8123d7f36 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -1208,11 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (mc & 0x4)
break;
- /* fall through */
+ fallthrough;
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
- /* fall through */
+ fallthrough;
case 0:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
@@ -1221,7 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (!(mc & 0x4))
break;
- /* fall through */
+ fallthrough;
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 3df64a973194..15a9ee7cd734 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1476,11 +1476,11 @@ static void init_vgachip(struct fb_info *info)
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
- /* fall through */
+ fallthrough;
case BT_GD5480:
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
- /* fall through */
+ fallthrough;
case BT_ALPINE:
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9c4f1be856ec..2df56bd303d2 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -49,6 +49,8 @@
#include <linux/cuda.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
+#endif
+#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
@@ -713,7 +715,7 @@ static int controlfb_blank(int blank_mode, struct fb_info *info)
break;
case FB_BLANK_POWERDOWN:
ctrl &= ~0x33;
- /* fall through */
+ fallthrough;
case FB_BLANK_NORMAL:
ctrl |= 0x400;
break;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8a31fc2b2258..66167830fefd 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2191,6 +2191,9 @@ static void updatescrollmode(struct fbcon_display *p,
}
}
+#define PITCH(w) (((w) + 7) >> 3)
+#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
+
static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
@@ -2200,6 +2203,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
+ if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
+ int size;
+ int pitch = PITCH(vc->vc_font.width);
+
+ /*
+ * If user font, ensure that a possible change to user font
+ * height or width will not allow a font data out-of-bounds access.
+ * NOTE: must use original charcount in calculation as font
+ * charcount can change and cannot be used to determine the
+ * font data allocated size.
+ */
+ if (pitch <= 0)
+ return -EINVAL;
+ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
+ if (size > FNTSIZE(vc->vc_font.data))
+ return -EINVAL;
+ }
+
virt_w = FBCON_SWAP(ops->rotate, width, height);
virt_h = FBCON_SWAP(ops->rotate, height, width);
virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
@@ -2652,7 +2673,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
int size;
int i, csum;
u8 *new_data, *data = font->data;
- int pitch = (font->width+7) >> 3;
+ int pitch = PITCH(font->width);
/* Is there a reason why fbconsole couldn't handle any charcount >256?
* If not this check should be changed to charcount < 256 */
@@ -2668,7 +2689,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
if (fbcon_invalid_charcount(info, charcount))
return -EINVAL;
- size = h * pitch * charcount;
+ size = CALC_FONTSZ(h, pitch, charcount);
new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 30e73ec4ad5c..6815bfb7f572 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
- int flags = info->flags;
int ret = 0;
u32 activate;
struct fb_var_screeninfo old_var;
@@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
event.data = &mode;
fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
- if (flags & FBINFO_MISC_USEREVENT)
- fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
-
return 0;
}
EXPORT_SYMBOL(fb_set_var);
@@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
return -EFAULT;
console_lock();
lock_fb_info(info);
- info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
- info->flags &= ~FBINFO_MISC_USEREVENT;
+ if (!ret)
+ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
unlock_fb_info(info);
console_unlock();
if (!ret && copy_to_user(argp, &var, sizeof(var)))
@@ -1310,7 +1306,7 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
case FBIOGET_CON2FBMAP:
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
- /* fall through */
+ fallthrough;
case FBIOBLANK:
ret = do_fb_ioctl(info, cmd, arg);
break;
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index d54c88f88991..65dae05fff8e 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
var->activate |= FB_ACTIVATE_FORCE;
console_lock();
- fb_info->flags |= FBINFO_MISC_USEREVENT;
err = fb_set_var(fb_info, var);
- fb_info->flags &= ~FBINFO_MISC_USEREVENT;
+ if (!err)
+ fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
console_unlock();
if (err)
return err;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 65491ae74808..e57c00824965 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (efi_enabled(EFI_BOOT) &&
+ if (efi_enabled(EFI_MEMMAP) &&
!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 67ebfe5c9f1d..a547c21c7e92 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1287,7 +1287,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
dev_warn(info->dev,
"MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_SET_PIXFMT_OLD);
- /* fall through */
+ fallthrough;
case MFB_SET_PIXFMT:
if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
return -EFAULT;
@@ -1297,7 +1297,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
dev_warn(info->dev,
"MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_GET_PIXFMT_OLD);
- /* fall through */
+ fallthrough;
case MFB_GET_PIXFMT:
pix_fmt = ad->pix_fmt;
if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 13ded3a10708..e5475ae1e158 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -534,7 +534,7 @@ static int gxt4500_setcolreg(unsigned int reg, unsigned int red,
break;
case DFA_PIX_32BIT:
val |= (reg << 24);
- /* fall through */
+ fallthrough;
case DFA_PIX_24BIT:
val |= (reg << 16) | (reg << 8);
break;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index e4c3c8b65da4..02411d89cb46 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -648,13 +648,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case VERSION_WS2008:
case VERSION_WIN7:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index c65ec7386e87..e6f35f8feefc 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -430,7 +430,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
break;
case 9 ... 15:
bpp = 15;
- /* fall through */
+ fallthrough;
case 16:
if ((1000000 / var->pixclock) > DACSPEED16) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 15/16bpp)\n",
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 01c75c031cb6..39ebbe026ddf 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -90,8 +90,6 @@ static int var_to_pixfmt(struct fb_var_screeninfo *var)
else
return PIXFMT_BGR888UNPACK;
}
-
- /* fall through */
}
return -EINVAL;
diff --git a/drivers/video/fbdev/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c
index 8335da4ca30e..9b0a324bb1b4 100644
--- a/drivers/video/fbdev/nvidia/nv_hw.c
+++ b/drivers/video/fbdev/nvidia/nv_hw.c
@@ -896,7 +896,7 @@ void NVCalcStateExt(struct nvidia_par *par,
if (!par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
0xeffffeff;
- /* fallthrough */
+ fallthrough;
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 5cd0f5f6a4ae..4501e848a36f 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -141,7 +141,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
- /* fall through */
+ fallthrough;
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, regno);
@@ -211,7 +211,7 @@ static int offb_blank(int blank, struct fb_info *info)
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
- /* fall through */
+ fallthrough;
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, i);
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index fa73acfc1371..7317c9aad677 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -328,13 +328,13 @@ static int omap_lcdc_setup_plane(int plane, int channel_out,
lcdc.bpp = 12;
break;
}
- /* fallthrough */
+ fallthrough;
case OMAPFB_COLOR_YUV422:
if (lcdc.ext_mode) {
lcdc.bpp = 16;
break;
}
- /* fallthrough */
+ fallthrough;
default:
/* FIXME: other BPPs.
* bpp1: code 0, size 256
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 0cbcc74fa943..3d090d2d9ed9 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -253,7 +253,7 @@ static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
if (fbdev->ctrl->setcolreg)
r = fbdev->ctrl->setcolreg(regno, red, green, blue,
transp, update_hw_pal);
- /* Fallthrough */
+ fallthrough;
case OMAPFB_COLOR_RGB565:
case OMAPFB_COLOR_RGB444:
if (r != 0)
@@ -443,7 +443,7 @@ static int set_color_mode(struct omapfb_plane_struct *plane,
return 0;
case 12:
var->bits_per_pixel = 16;
- /* fall through */
+ fallthrough;
case 16:
if (plane->fbdev->panel->bpp == 12)
plane->color_mode = OMAPFB_COLOR_RGB444;
@@ -1531,27 +1531,27 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
case OMAPFB_ACTIVE:
for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
unregister_framebuffer(fbdev->fb_info[i]);
- /* fall through */
+ fallthrough;
case 7:
omapfb_unregister_sysfs(fbdev);
- /* fall through */
+ fallthrough;
case 6:
if (fbdev->panel->disable)
fbdev->panel->disable(fbdev->panel);
- /* fall through */
+ fallthrough;
case 5:
omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
- /* fall through */
+ fallthrough;
case 4:
planes_cleanup(fbdev);
- /* fall through */
+ fallthrough;
case 3:
ctrl_cleanup(fbdev);
- /* fall through */
+ fallthrough;
case 2:
if (fbdev->panel->cleanup)
fbdev->panel->cleanup(fbdev->panel);
- /* fall through */
+ fallthrough;
case 1:
dev_set_drvdata(fbdev->dev, NULL);
kfree(fbdev);
@@ -1854,7 +1854,7 @@ static int __init omapfb_setup(char *options)
case 'm':
case 'M':
vram *= 1024;
- /* Fall through */
+ fallthrough;
case 'k':
case 'K':
vram *= 1024;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 3920a0db0390..b2d6e6df2161 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -1861,7 +1861,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
- /* fall through */
+ fallthrough;
case OMAP_DSS_ROT_90:
case OMAP_DSS_ROT_270:
*offset1 = 0;
@@ -1884,7 +1884,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
- /* fall through */
+ fallthrough;
case OMAP_DSS_ROT_90 + 4:
case OMAP_DSS_ROT_270 + 4:
*offset1 = 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index f40be68d5aac..ea8c88aa4477 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -760,7 +760,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
r = -ENODEV;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 836e7b1639ce..a3decc7fadde 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -882,7 +882,7 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
/ (var->bits_per_pixel >> 2);
break;
}
- /* fall through */
+ fallthrough;
default:
screen_width = fix->line_length / (var->bits_per_pixel >> 3);
break;
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index c7c98d8e2359..0642555289e0 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -233,10 +233,10 @@ static u32 to3264(u32 timing, int bpp, int is64)
switch (bpp) {
case 24:
timing *= 3;
- /* fall through */
+ fallthrough;
case 8:
timing >>= 1;
- /* fall through */
+ fallthrough;
case 16:
timing >>= 1;
case 32:
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 9df78fb77267..203c254f8f6c 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -29,6 +29,7 @@
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/fb.h>
+#include <linux/fbcon.h>
#include <linux/init.h>
#include <asm/cell-regs.h>
@@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
var = info->var;
fb_videomode_to_var(&var, vmode);
console_lock();
- info->flags |= FBINFO_MISC_USEREVENT;
/* Force, in case only special bits changed */
var.activate |= FB_ACTIVATE_FORCE;
par->new_mode_id = val;
retval = fb_set_var(info, &var);
- info->flags &= ~FBINFO_MISC_USEREVENT;
+ if (!retval)
+ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
console_unlock();
}
break;
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index eedfbd3572a8..47e6a1d0d229 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -60,8 +60,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var)
else
return PIX_FMT_BGR1555;
}
-
- /* fall through */
}
/*
@@ -87,8 +85,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var)
else
return PIX_FMT_BGR888UNPACK;
}
-
- /* fall through */
}
return -EINVAL;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index a53d24fb7183..f1551e00eb12 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1614,7 +1614,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
*/
if (old_state != C_DISABLE_PM)
break;
- /* fall through */
+ fallthrough;
case C_ENABLE:
/*
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 9b3493846f4d..ce55b9d2e862 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -1093,7 +1093,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
case 9 ... 15:
var->green.length = 5;
- /* fall through */
+ fallthrough;
case 16:
var->bits_per_pixel = 16;
/* The Riva128 supports RGB555 only */
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 9dc925054930..ba316bd56efd 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -284,7 +284,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/* 666 with one bit alpha/transparency */
var->transp.offset = 18;
var->transp.length = 1;
- /* fall through */
+ fallthrough;
case 18:
var->bits_per_pixel = 32;
@@ -312,7 +312,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
case 25:
var->transp.length = var->bits_per_pixel - 24;
var->transp.offset = 24;
- /* fall through */
+ fallthrough;
case 24:
/* our 24bpp is unpacked, so 32bpp */
var->bits_per_pixel = 32;
@@ -809,7 +809,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
wincon &= ~WINCONx_ENWIN;
sfb->enabled &= ~(1 << index);
- /* fall through - to FB_BLANK_NORMAL */
+ fallthrough; /* to FB_BLANK_NORMAL */
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index bda6cc313c8b..e31cf63b0a62 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -935,7 +935,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state)
*/
if (old_state != C_DISABLE_PM)
break;
- /* fall through */
+ fallthrough;
case C_ENABLE:
/*
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 3fd87aeb6c79..a2442aae7e12 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1860,7 +1860,7 @@ static int savage_init_hw(struct savagefb_par *par)
if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6))
RamSavage4[1] = 8;
- /*FALLTHROUGH*/
+ fallthrough;
case S3_SAVAGE2000:
videoRam = RamSavage4[(config1 & 0xE0) >> 5] * 1024;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 8a27d12e6ea8..c1043420dbd3 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1594,7 +1594,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
- /* Fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
@@ -2085,7 +2085,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
- /* Fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index ac140962b1bf..03c736f6f3d0 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1739,7 +1739,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
@@ -1793,7 +1793,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
@@ -1804,7 +1804,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
@@ -1815,7 +1815,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 3dd1b1d76e98..6a52eba64559 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1005,7 +1005,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
- /* fall through */
+ fallthrough;
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 8e06ba912d60..09425ec317ba 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -312,7 +312,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
/* Enable the PWM */
pwm_enable(par->pwm);
- dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n",
+ dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n",
par->pwm->pwm, pwm_get_period(par->pwm));
}
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index de953ddb6312..265865610edc 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -999,7 +999,7 @@ stifb_blank(int blank_mode, struct fb_info *info)
case S9000_ID_HCRX:
HYPER_ENABLE_DISABLE_DISPLAY(fb, enable);
break;
- case S9000_ID_A1659A: /* fall through */
+ case S9000_ID_A1659A:
case S9000_ID_TIMBER:
case CRX24_OVERLAY_PLANES:
default:
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
dev_name);
goto out_err0;
}
- /* fall through */
+ fallthrough;
case S9000_ID_ARTIST:
case S9000_ID_HCRX:
case S9000_ID_TIMBER:
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index f73e26c18c09..f056d80f6359 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -523,7 +523,7 @@ static int tdfxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
case 32:
var->transp.offset = 24;
var->transp.length = 8;
- /* fall through */
+ fallthrough;
case 24:
var->red.offset = 16;
var->green.offset = 8;
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 3fea01db58d6..4a869402d120 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -744,7 +744,7 @@ static void set_lcd_output_path(int set_iga, int output_interface)
viaparinfo->chip_info->gfx_chip_name))
viafb_write_reg_mask(CR97, VIACR, 0x84,
BIT7 + BIT2 + BIT1 + BIT0);
- /* fall through */
+ fallthrough;
case INTERFACE_DVP0:
case INTERFACE_DVP1:
case INTERFACE_DFP_HIGH:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 00307b8693bf..5ec51445bee8 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -677,7 +677,7 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8be02f333b7a..31cc97f2f515 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb)
s64 target;
u32 num_pages;
- virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
- &num_pages);
-
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- num_pages = le32_to_cpu((__force __le32)num_pages);
+ virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
target = num_pages;
return target - vb->num_pages;
@@ -462,11 +459,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
u32 actual = vb->num_pages;
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- actual = (__force u32)cpu_to_le32(actual);
-
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
- &actual);
+ virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
+ &actual);
}
static void update_balloon_stats_func(struct work_struct *work)
@@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap)) {
- virtio_cread(vb->vdev, struct virtio_balloon_config,
- free_page_hint_cmd_id,
- &vb->cmd_id_received_cache);
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+ virtio_cread_le(vb->vdev, struct virtio_balloon_config,
+ free_page_hint_cmd_id,
+ &vb->cmd_id_received_cache);
}
return vb->cmd_id_received_cache;
@@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused))
;
- vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+ vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
@@ -987,8 +979,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (!want_init_on_free())
memset(&poison_val, PAGE_POISON, sizeof(poison_val));
- virtio_cwrite(vb->vdev, struct virtio_balloon_config,
- poison_val, &poison_val);
+ virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
}
vb->pr_dev_info.report = virtballoon_free_page_report;
@@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev)
else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
- __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
return 0;
}
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index efaf65b0f42d..877b2ea3ed05 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi,
{
u8 size;
- virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
- virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
- virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+ virtio_cwrite_le(vi->vdev, struct virtio_input_config, select, &select);
+ virtio_cwrite_le(vi->vdev, struct virtio_input_config, subsel, &subsel);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, size, &size);
return size;
}
@@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
u32 mi, ma, re, fu, fl;
virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
input_abs_set_res(vi->idev, abs, re);
}
@@ -244,14 +244,14 @@ static int virtinput_probe(struct virtio_device *vdev)
size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
if (size >= sizeof(struct virtio_input_devids)) {
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.bustype, &vi->idev->id.bustype);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.vendor, &vi->idev->id.vendor);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.product, &vi->idev->id.product);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.version, &vi->idev->id.version);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.bustype, &vi->idev->id.bustype);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.vendor, &vi->idev->id.vendor);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.product, &vi->idev->id.product);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.version, &vi->idev->id.version);
} else {
vi->idev->id.bustype = BUS_VIRTUAL;
}
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index f26f5f64ae82..c08512fcea90 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1530,21 +1530,21 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
- virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
- &new_plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &new_plugged_size);
if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
vm->plugged_size = new_plugged_size;
/* calculate the last usable memory block id */
- virtio_cread(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config,
+ usable_region_size, &usable_region_size);
end_addr = vm->addr + usable_region_size;
end_addr = min(end_addr, phys_limit);
vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
/* see if there is a request to change the size */
- virtio_cread(vm->vdev, struct virtio_mem_config, requested_size,
- &vm->requested_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
+ &vm->requested_size);
dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
@@ -1677,16 +1677,16 @@ static int virtio_mem_init(struct virtio_mem *vm)
}
/* Fetch all properties that can't change. */
- virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
- &vm->plugged_size);
- virtio_cread(vm->vdev, struct virtio_mem_config, block_size,
- &vm->device_block_size);
- virtio_cread(vm->vdev, struct virtio_mem_config, node_id,
- &node_id);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &vm->plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
+ &vm->device_block_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
+ &node_id);
vm->nid = virtio_mem_translate_node_id(vm, node_id);
- virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
- virtio_cread(vm->vdev, struct virtio_mem_config, region_size,
- &vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
+ &vm->region_size);
/*
* We always hotplug memory in memory block granularity. This way,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index db93cedd262f..3e14e700b231 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -27,16 +27,16 @@
* method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
* for 16-bit fields and 8-bit accesses for 8-bit fields.
*/
-static inline u8 vp_ioread8(u8 __iomem *addr)
+static inline u8 vp_ioread8(const u8 __iomem *addr)
{
return ioread8(addr);
}
-static inline u16 vp_ioread16 (__le16 __iomem *addr)
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
{
return ioread16(addr);
}
-static inline u32 vp_ioread32(__le32 __iomem *addr)
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
{
return ioread32(addr);
}
@@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
* @dev: the pci device
* @cfg_type: the VIRTIO_PCI_CAP_* value we seek
* @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ * @bars: the bitmask of BARs
*
* Returns offset of the capability, or 0.
*/
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a2de775801af..becc77697960 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
static bool vring_use_dma_api(struct virtio_device *vdev)
{
- if (!virtio_has_iommu_quirk(vdev))
+ if (!virtio_has_dma_quirk(vdev))
return true;
/* Otherwise, we are left to guess. */
@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
virtio_mb(vq->weak_barriers);
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
virtqueue_poll_split(_vq, last_used_idx);
@@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_VERSION_1:
break;
- case VIRTIO_F_IOMMU_PLATFORM:
+ case VIRTIO_F_ACCESS_PLATFORM:
break;
case VIRTIO_F_RING_PACKED:
break;
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c30eb55030be..4a9ddb44b2a7 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- ops->get_config(vdpa, offset, buf, len);
+ vdpa_get_config(vdpa, offset, buf, len);
}
static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
@@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
static void virtio_vdpa_reset(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- return ops->set_status(vdpa, 0);
+ vdpa_reset(vdpa);
}
static bool virtio_vdpa_notify(struct virtqueue *vq)
@@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return ops->set_features(vdpa, vdev->features);
+ return vdpa_set_features(vdpa, vdev->features);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4f4687c46d38..ab7aad5a1e69 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1027,7 +1027,7 @@ config ADVANTECH_WDT
If you are configuring a Linux kernel for the Advantech single-board
computer, say `Y' here to support its built-in watchdog timer
feature. More information can be found at
- <http://www.advantech.com.tw/products/>
+ <https://www.advantech.com.tw/products/>
config ALIM1535_WDT
tristate "ALi M1535 PMU Watchdog Timer"
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 0e4c18a2aa42..554fe85da50e 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -177,7 +177,7 @@ static long advwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (advwdt_set_heartbeat(new_timeout))
return -EINVAL;
advwdt_ping();
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 42338c7d4540..bfb9a91ca1df 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -220,7 +220,7 @@ static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
ali_keepalive();
}
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 5af0358f4390..4ff7f5afb7aa 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -279,7 +279,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
timeout = new_timeout;
wdt_keepalive();
}
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index c087027ffd5d..ff37dc91057d 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -235,8 +235,7 @@ static long ar7_wdt_ioctl(struct file *file,
ar7_wdt_update_margin(new_margin);
ar7_wdt_kick(1);
spin_unlock(&wdt_lock);
- /* Fall through */
-
+ fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, (int *)arg))
return -EFAULT;
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index d6dff97c280b..0f18f06a21b6 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -215,8 +215,8 @@ static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
err = ath79_wdt_set_timeout(t);
if (err)
break;
+ fallthrough;
- /* fallthrough */
case WDIOC_GETTIMEOUT:
err = put_user(timeout, p);
break;
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index eb850a8d19df..8237c4e9c2a0 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -279,7 +279,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
- return -ENODEV;
+ return PTR_ERR(wdt->base);
wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
ret = bcm_kona_wdt_set_resolution_reg(wdt);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 9d09bbfdef20..7817fb976f9c 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -39,6 +39,11 @@ static bool booke_wdt_enabled;
module_param(booke_wdt_enabled, bool, 0);
static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
module_param(booke_wdt_period, int, 0);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#ifdef CONFIG_PPC_FSL_BOOK3E
@@ -215,7 +220,6 @@ static void __exit booke_wdt_exit(void)
static int __init booke_wdt_init(void)
{
int ret = 0;
- bool nowayout = WATCHDOG_NOWAYOUT;
pr_info("powerpc book-e watchdog driver loaded\n");
booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index fba21de2bbad..32d0e1781e63 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2010-2011 Picochip Ltd., Jamie Iles
- * http://www.picochip.com
+ * https://www.picochip.com
*
* This file implements a driver for the Synopsys DesignWare watchdog device
* in the many subsystems. The watchdog has 16 different timeout periods
@@ -13,6 +13,8 @@
*/
#include <linux/bitops.h>
+#include <linux/limits.h>
+#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -20,11 +22,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
+#include <linux/debugfs.h>
#define WDOG_CONTROL_REG_OFFSET 0x00
#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
@@ -34,26 +38,64 @@
#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
-
-/* The maximum TOP (timeout period) value that can be set in the watchdog. */
-#define DW_WDT_MAX_TOP 15
+#define WDOG_INTERRUPT_STATUS_REG_OFFSET 0x10
+#define WDOG_INTERRUPT_CLEAR_REG_OFFSET 0x14
+#define WDOG_COMP_PARAMS_5_REG_OFFSET 0xe4
+#define WDOG_COMP_PARAMS_4_REG_OFFSET 0xe8
+#define WDOG_COMP_PARAMS_3_REG_OFFSET 0xec
+#define WDOG_COMP_PARAMS_2_REG_OFFSET 0xf0
+#define WDOG_COMP_PARAMS_1_REG_OFFSET 0xf4
+#define WDOG_COMP_PARAMS_1_USE_FIX_TOP BIT(6)
+#define WDOG_COMP_VERSION_REG_OFFSET 0xf8
+#define WDOG_COMP_TYPE_REG_OFFSET 0xfc
+
+/* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */
+#define DW_WDT_NUM_TOPS 16
+#define DW_WDT_FIX_TOP(_idx) (1U << (16 + _idx))
#define DW_WDT_DEFAULT_SECONDS 30
+static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = {
+ DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2),
+ DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5),
+ DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8),
+ DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11),
+ DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14),
+ DW_WDT_FIX_TOP(15)
+};
+
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+enum dw_wdt_rmod {
+ DW_WDT_RMOD_RESET = 1,
+ DW_WDT_RMOD_IRQ = 2
+};
+
+struct dw_wdt_timeout {
+ u32 top_val;
+ unsigned int sec;
+ unsigned int msec;
+};
+
struct dw_wdt {
void __iomem *regs;
struct clk *clk;
+ struct clk *pclk;
unsigned long rate;
+ enum dw_wdt_rmod rmod;
+ struct dw_wdt_timeout timeouts[DW_WDT_NUM_TOPS];
struct watchdog_device wdd;
struct reset_control *rst;
/* Save/restore */
u32 control;
u32 timeout;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+#endif
};
#define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
@@ -64,20 +106,84 @@ static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
WDOG_CONTROL_REG_WDT_EN_MASK;
}
-static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top)
+static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
{
+ u32 val;
+
+ val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ if (rmod == DW_WDT_RMOD_IRQ)
+ val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+ else
+ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+ dw_wdt->rmod = rmod;
+}
+
+static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
+ unsigned int timeout, u32 *top_val)
+{
+ int idx;
+
/*
- * There are 16 possible timeout values in 0..15 where the number of
- * cycles is 2 ^ (16 + i) and the watchdog counts down.
+ * Find a TOP with timeout greater or equal to the requested number.
+ * Note we'll select a TOP with maximum timeout if the requested
+ * timeout couldn't be reached.
*/
- return (1U << (16 + top)) / dw_wdt->rate;
+ for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+ if (dw_wdt->timeouts[idx].sec >= timeout)
+ break;
+ }
+
+ if (idx == DW_WDT_NUM_TOPS)
+ --idx;
+
+ *top_val = dw_wdt->timeouts[idx].top_val;
+
+ return dw_wdt->timeouts[idx].sec;
+}
+
+static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
+{
+ int idx;
+
+ /*
+ * We'll find a timeout greater or equal to one second anyway because
+ * the driver probe would have failed if there was none.
+ */
+ for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+ if (dw_wdt->timeouts[idx].sec)
+ break;
+ }
+
+ return dw_wdt->timeouts[idx].sec;
}
-static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
+static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
{
- int top = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+ struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
+ u64 msec;
+
+ msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec;
- return dw_wdt_top_in_seconds(dw_wdt, top);
+ return msec < UINT_MAX ? msec : UINT_MAX;
+}
+
+static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
+{
+ int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+ int idx;
+
+ for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+ if (dw_wdt->timeouts[idx].top_val == top_val)
+ break;
+ }
+
+ /*
+ * In IRQ mode due to the two stages counter, the actual timeout is
+ * twice greater than the TOP setting.
+ */
+ return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
}
static int dw_wdt_ping(struct watchdog_device *wdd)
@@ -93,17 +199,23 @@ static int dw_wdt_ping(struct watchdog_device *wdd)
static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
- int i, top_val = DW_WDT_MAX_TOP;
+ unsigned int timeout;
+ u32 top_val;
/*
- * Iterate over the timeout values until we find the closest match. We
- * always look for >=.
+ * Note IRQ mode being enabled means having a non-zero pre-timeout
+ * setup. In this case we try to find a TOP as close to the half of the
+ * requested timeout as possible since DW Watchdog IRQ mode is designed
+ * in two stages way - first timeout rises the pre-timeout interrupt,
+ * second timeout performs the system reset. So basically the effective
+ * watchdog-caused reset happens after two watchdog TOPs elapsed.
*/
- for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
- if (dw_wdt_top_in_seconds(dw_wdt, i) >= top_s) {
- top_val = i;
- break;
- }
+ timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
+ &top_val);
+ if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+ wdd->pretimeout = timeout;
+ else
+ wdd->pretimeout = 0;
/*
* Set the new value in the watchdog. Some versions of dw_wdt
@@ -114,25 +226,47 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ /* Kick new TOP value into the watchdog counter if activated. */
+ if (watchdog_active(wdd))
+ dw_wdt_ping(wdd);
+
/*
* In case users set bigger timeout value than HW can support,
* kernel(watchdog_dev.c) helps to feed watchdog before
* wdd->max_hw_heartbeat_ms
*/
if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
- wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+ wdd->timeout = timeout * dw_wdt->rmod;
else
wdd->timeout = top_s;
return 0;
}
+static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
+{
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+
+ /*
+ * We ignore actual value of the timeout passed from user-space
+ * using it as a flag whether the pretimeout functionality is intended
+ * to be activated.
+ */
+ dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
+ dw_wdt_set_timeout(wdd, wdd->timeout);
+
+ return 0;
+}
+
static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
{
u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
- /* Disable interrupt mode; always perform system reset. */
- val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+ /* Disable/enable interrupt mode depending on the RMOD flag. */
+ if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+ val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+ else
+ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
/* Enable watchdog. */
val |= WDOG_CONTROL_REG_WDT_EN_MASK;
writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
@@ -170,6 +304,7 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
if (dw_wdt_is_enabled(dw_wdt))
writel(WDOG_COUNTER_RESTART_KICK_VALUE,
dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
@@ -185,9 +320,19 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+ unsigned int sec;
+ u32 val;
+
+ val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
+ sec = val / dw_wdt->rate;
+
+ if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
+ val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+ if (!val)
+ sec += wdd->pretimeout;
+ }
- return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
- dw_wdt->rate;
+ return sec;
}
static const struct watchdog_info dw_wdt_ident = {
@@ -196,16 +341,41 @@ static const struct watchdog_info dw_wdt_ident = {
.identity = "Synopsys DesignWare Watchdog",
};
+static const struct watchdog_info dw_wdt_pt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE,
+ .identity = "Synopsys DesignWare Watchdog",
+};
+
static const struct watchdog_ops dw_wdt_ops = {
.owner = THIS_MODULE,
.start = dw_wdt_start,
.stop = dw_wdt_stop,
.ping = dw_wdt_ping,
.set_timeout = dw_wdt_set_timeout,
+ .set_pretimeout = dw_wdt_set_pretimeout,
.get_timeleft = dw_wdt_get_timeleft,
.restart = dw_wdt_restart,
};
+static irqreturn_t dw_wdt_irq(int irq, void *devid)
+{
+ struct dw_wdt *dw_wdt = devid;
+ u32 val;
+
+ /*
+ * We don't clear the IRQ status. It's supposed to be done by the
+ * following ping operations.
+ */
+ val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+ if (!val)
+ return IRQ_NONE;
+
+ watchdog_notify_pretimeout(&dw_wdt->wdd);
+
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
@@ -214,6 +384,7 @@ static int dw_wdt_suspend(struct device *dev)
dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ clk_disable_unprepare(dw_wdt->pclk);
clk_disable_unprepare(dw_wdt->clk);
return 0;
@@ -227,6 +398,12 @@ static int dw_wdt_resume(struct device *dev)
if (err)
return err;
+ err = clk_prepare_enable(dw_wdt->pclk);
+ if (err) {
+ clk_disable_unprepare(dw_wdt->clk);
+ return err;
+ }
+
writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
@@ -238,6 +415,139 @@ static int dw_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
+/*
+ * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
+ * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers
+ * depending on the system engineer imagination. The next method handles the
+ * passed TOPs array to pre-calculate the effective timeouts and to sort the
+ * TOP items out in the ascending order with respect to the timeouts.
+ */
+
+static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
+{
+ struct dw_wdt_timeout tout, *dst;
+ int val, tidx;
+ u64 msec;
+
+ /*
+ * We walk over the passed TOPs array and calculate corresponding
+ * timeouts in seconds and milliseconds. The milliseconds granularity
+ * is needed to distinguish the TOPs with very close timeouts and to
+ * set the watchdog max heartbeat setting further.
+ */
+ for (val = 0; val < DW_WDT_NUM_TOPS; ++val) {
+ tout.top_val = val;
+ tout.sec = tops[val] / dw_wdt->rate;
+ msec = (u64)tops[val] * MSEC_PER_SEC;
+ do_div(msec, dw_wdt->rate);
+ tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC);
+
+ /*
+ * Find a suitable place for the current TOP in the timeouts
+ * array so that the list is remained in the ascending order.
+ */
+ for (tidx = 0; tidx < val; ++tidx) {
+ dst = &dw_wdt->timeouts[tidx];
+ if (tout.sec > dst->sec || (tout.sec == dst->sec &&
+ tout.msec >= dst->msec))
+ continue;
+ else
+ swap(*dst, tout);
+ }
+
+ dw_wdt->timeouts[val] = tout;
+ }
+}
+
+static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
+{
+ u32 data, of_tops[DW_WDT_NUM_TOPS];
+ const u32 *tops;
+ int ret;
+
+ /*
+ * Retrieve custom or fixed counter values depending on the
+ * WDT_USE_FIX_TOP flag found in the component specific parameters
+ * #1 register.
+ */
+ data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
+ if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) {
+ tops = dw_wdt_fix_tops;
+ } else {
+ ret = of_property_read_variable_u32_array(dev_of_node(dev),
+ "snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS,
+ DW_WDT_NUM_TOPS);
+ if (ret < 0) {
+ dev_warn(dev, "No valid TOPs array specified\n");
+ tops = dw_wdt_fix_tops;
+ } else {
+ tops = of_tops;
+ }
+ }
+
+ /* Convert the specified TOPs into an array of watchdog timeouts. */
+ dw_wdt_handle_tops(dw_wdt, tops);
+ if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
+ dev_err(dev, "No any valid TOP detected\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DW_WDT_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off \
+}
+
+static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = {
+ DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET),
+ DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET),
+ DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET),
+ DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET),
+ DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET),
+ DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET),
+ DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET),
+ DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET),
+ DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET),
+ DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET),
+ DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET),
+ DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET)
+};
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
+{
+ struct device *dev = dw_wdt->wdd.parent;
+ struct debugfs_regset32 *regset;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ regset->regs = dw_wdt_dbgfs_regs;
+ regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs);
+ regset->base = dw_wdt->regs;
+
+ dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+ debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
+}
+
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
+{
+ debugfs_remove_recursive(dw_wdt->dbgfs_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
+
+#endif /* !CONFIG_DEBUG_FS */
+
static int dw_wdt_drv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -253,9 +563,18 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
if (IS_ERR(dw_wdt->regs))
return PTR_ERR(dw_wdt->regs);
- dw_wdt->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(dw_wdt->clk))
- return PTR_ERR(dw_wdt->clk);
+ /*
+ * Try to request the watchdog dedicated timer clock source. It must
+ * be supplied if asynchronous mode is enabled. Otherwise fallback
+ * to the common timer/bus clocks configuration, in which the very
+ * first found clock supply both timer and APB signals.
+ */
+ dw_wdt->clk = devm_clk_get(dev, "tclk");
+ if (IS_ERR(dw_wdt->clk)) {
+ dw_wdt->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dw_wdt->clk))
+ return PTR_ERR(dw_wdt->clk);
+ }
ret = clk_prepare_enable(dw_wdt->clk);
if (ret)
@@ -267,20 +586,64 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
goto out_disable_clk;
}
+ /*
+ * Request APB clock if device is configured with async clocks mode.
+ * In this case both tclk and pclk clocks are supposed to be specified.
+ * Alas we can't know for sure whether async mode was really activated,
+ * so the pclk phandle reference is left optional. If it couldn't be
+ * found we consider the device configured in synchronous clocks mode.
+ */
+ dw_wdt->pclk = devm_clk_get_optional(dev, "pclk");
+ if (IS_ERR(dw_wdt->pclk)) {
+ ret = PTR_ERR(dw_wdt->pclk);
+ goto out_disable_clk;
+ }
+
+ ret = clk_prepare_enable(dw_wdt->pclk);
+ if (ret)
+ goto out_disable_clk;
+
dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(dw_wdt->rst)) {
ret = PTR_ERR(dw_wdt->rst);
- goto out_disable_clk;
+ goto out_disable_pclk;
+ }
+
+ /* Enable normal reset without pre-timeout by default. */
+ dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
+
+ /*
+ * Pre-timeout IRQ is optional, since some hardware may lack support
+ * of it. Note we must request rising-edge IRQ, since the lane is left
+ * pending either until the next watchdog kick event or up to the
+ * system reset.
+ */
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret > 0) {
+ ret = devm_request_irq(dev, ret, dw_wdt_irq,
+ IRQF_SHARED | IRQF_TRIGGER_RISING,
+ pdev->name, dw_wdt);
+ if (ret)
+ goto out_disable_pclk;
+
+ dw_wdt->wdd.info = &dw_wdt_pt_ident;
+ } else {
+ if (ret == -EPROBE_DEFER)
+ goto out_disable_pclk;
+
+ dw_wdt->wdd.info = &dw_wdt_ident;
}
reset_control_deassert(dw_wdt->rst);
+ ret = dw_wdt_init_timeouts(dw_wdt, dev);
+ if (ret)
+ goto out_disable_clk;
+
wdd = &dw_wdt->wdd;
- wdd->info = &dw_wdt_ident;
wdd->ops = &dw_wdt_ops;
- wdd->min_timeout = 1;
- wdd->max_hw_heartbeat_ms =
- dw_wdt_top_in_seconds(dw_wdt, DW_WDT_MAX_TOP) * 1000;
+ wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
+ wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
wdd->parent = dev;
watchdog_set_drvdata(wdd, dw_wdt);
@@ -293,7 +656,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
* devicetree.
*/
if (dw_wdt_is_enabled(dw_wdt)) {
- wdd->timeout = dw_wdt_get_top(dw_wdt);
+ wdd->timeout = dw_wdt_get_timeout(dw_wdt);
set_bit(WDOG_HW_RUNNING, &wdd->status);
} else {
wdd->timeout = DW_WDT_DEFAULT_SECONDS;
@@ -306,10 +669,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
ret = watchdog_register_device(wdd);
if (ret)
- goto out_disable_clk;
+ goto out_disable_pclk;
+
+ dw_wdt_dbgfs_init(dw_wdt);
return 0;
+out_disable_pclk:
+ clk_disable_unprepare(dw_wdt->pclk);
+
out_disable_clk:
clk_disable_unprepare(dw_wdt->clk);
return ret;
@@ -319,8 +687,11 @@ static int dw_wdt_drv_remove(struct platform_device *pdev)
{
struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
+ dw_wdt_dbgfs_clear(dw_wdt);
+
watchdog_unregister_device(&dw_wdt->wdd);
reset_control_assert(dw_wdt->rst);
+ clk_disable_unprepare(dw_wdt->pclk);
clk_disable_unprepare(dw_wdt->clk);
return 0;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index f5ffa7be066e..2418ebb707bd 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -286,7 +286,7 @@ static long eurwdt_ioctl(struct file *file,
eurwdt_timeout = time;
eurwdt_set_timeout(time);
spin_unlock(&eurwdt_lock);
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(eurwdt_timeout, p);
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index a3c44d75d80e..f60beec1bbae 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -306,27 +306,6 @@ exit_unlock:
return err;
}
-static int f71862fg_pin_configure(unsigned short ioaddr)
-{
- /* When ioaddr is non-zero the calling function has to take care of
- mutex handling and superio preparation! */
-
- if (f71862fg_pin == 63) {
- if (ioaddr) {
- /* SPI must be disabled first to use this pin! */
- superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
- superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
- }
- } else if (f71862fg_pin == 56) {
- if (ioaddr)
- superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
- } else {
- pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin);
- return -EINVAL;
- }
- return 0;
-}
-
static int watchdog_start(void)
{
int err;
@@ -352,9 +331,13 @@ static int watchdog_start(void)
break;
case f71862fg:
- err = f71862fg_pin_configure(watchdog.sioaddr);
- if (err)
- goto exit_superio;
+ if (f71862fg_pin == 63) {
+ /* SPI must be disabled first to use this pin! */
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+ superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 4);
+ } else if (f71862fg_pin == 56) {
+ superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
+ }
break;
case f71868:
@@ -629,7 +612,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
if (new_options & WDIOS_ENABLECARD)
return watchdog_start();
- /* fall through */
+ fallthrough;
case WDIOC_KEEPALIVE:
watchdog_keepalive();
@@ -643,7 +626,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
watchdog_keepalive();
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(watchdog.timeout, uarg.i);
@@ -690,9 +673,9 @@ static int __init watchdog_init(int sioaddr)
* into the module have been registered yet.
*/
watchdog.sioaddr = sioaddr;
- watchdog.ident.options = WDIOC_SETTIMEOUT
- | WDIOF_MAGICCLOSE
- | WDIOF_KEEPALIVEPING;
+ watchdog.ident.options = WDIOF_MAGICCLOSE
+ | WDIOF_KEEPALIVEPING
+ | WDIOF_CARDRESET;
snprintf(watchdog.ident.identity,
sizeof(watchdog.ident.identity), "%s watchdog",
@@ -706,6 +689,13 @@ static int __init watchdog_init(int sioaddr)
wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
+ /*
+ * We don't want WDTMOUT_STS to stick around till regular reboot.
+ * Write 1 to the bit to clear it to zero.
+ */
+ superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
+ wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
+
superio_exit(sioaddr);
err = watchdog_set_timeout(timeout);
@@ -803,7 +793,6 @@ static int __init f71808e_find(int sioaddr)
break;
case SIO_F71862_ID:
watchdog.type = f71862fg;
- err = f71862fg_pin_configure(0); /* validate module parameter */
break;
case SIO_F71868_ID:
watchdog.type = f71868;
@@ -852,6 +841,11 @@ static int __init f71808e_init(void)
int err = -ENODEV;
int i;
+ if (f71862fg_pin != 63 && f71862fg_pin != 56) {
+ pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin);
+ return -EINVAL;
+ }
+
for (i = 0; i < ARRAY_SIZE(addrs); i++) {
err = f71808e_find(addrs[i]);
if (err == 0)
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index f6541d1b65e3..df5406aa7d25 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -201,7 +201,7 @@ static long gef_wdt_ioctl(struct file *file, unsigned int cmd,
if (get_user(timeout, (int __user *)argp))
return -EFAULT;
gef_wdt_set_timeout(timeout);
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(gef_wdt_timeout, (int __user *)argp))
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 9914a4283cb2..83418924e30a 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -185,7 +185,7 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
if (geodewdt_set_heartbeat(interval))
return -EINVAL;
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 2b65ea9451d1..a0ddedc362fc 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -214,7 +214,7 @@ static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (ibwdt_set_heartbeat(new_margin))
return -EINVAL;
ibwdt_ping();
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 2fed40d14007..9b89d2f09568 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -303,7 +303,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
superio_exit();
it8712f_wdt_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 09886616fd21..aae29dcfaf11 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -136,7 +136,7 @@ static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd,
heartbeat = time;
wdt_enable();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(heartbeat, (int *)arg);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 60ed6252e5f4..f388a769dbd3 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -155,7 +155,7 @@ static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
heartbeat = time;
wdt_enable();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(heartbeat, (int *)arg);
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 80ff94688487..743377c5b173 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -171,7 +171,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n)
switch (n) {
case WD1:
zf_writew(COUNTER_1, new);
- /* fall through */
+ fallthrough;
case WD2:
zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
default:
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
index 03b9ac4b99af..54193369e85c 100644
--- a/drivers/watchdog/mlx_wdt.c
+++ b/drivers/watchdog/mlx_wdt.c
@@ -21,6 +21,7 @@
#define MLXREG_WDT_CLOCK_SCALE 1000
#define MLXREG_WDT_MAX_TIMEOUT_TYPE1 32
#define MLXREG_WDT_MAX_TIMEOUT_TYPE2 255
+#define MLXREG_WDT_MAX_TIMEOUT_TYPE3 65535
#define MLXREG_WDT_MIN_TIMEOUT 1
#define MLXREG_WDT_OPTIONS_BASE (WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | \
WDIOF_SETTIMEOUT)
@@ -49,6 +50,7 @@ struct mlxreg_wdt {
int tleft_idx;
int ping_idx;
int reset_idx;
+ int regmap_val_sz;
enum mlxreg_wdt_type wdt_type;
};
@@ -111,7 +113,8 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
u32 regval, set_time, hw_timeout;
int rc;
- if (wdt->wdt_type == MLX_WDT_TYPE1) {
+ switch (wdt->wdt_type) {
+ case MLX_WDT_TYPE1:
rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
if (rc)
return rc;
@@ -120,14 +123,32 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
regval = (regval & reg_data->mask) | hw_timeout;
/* Rowndown to actual closest number of sec. */
set_time = BIT(hw_timeout) / MLXREG_WDT_CLOCK_SCALE;
- } else {
+ rc = regmap_write(wdt->regmap, reg_data->reg, regval);
+ break;
+ case MLX_WDT_TYPE2:
+ set_time = timeout;
+ rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
+ break;
+ case MLX_WDT_TYPE3:
+ /* WD_TYPE3 has 2B set time register */
set_time = timeout;
- regval = timeout;
+ if (wdt->regmap_val_sz == 1) {
+ regval = timeout & 0xff;
+ rc = regmap_write(wdt->regmap, reg_data->reg, regval);
+ if (!rc) {
+ regval = (timeout & 0xff00) >> 8;
+ rc = regmap_write(wdt->regmap,
+ reg_data->reg + 1, regval);
+ }
+ } else {
+ rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
+ }
+ break;
+ default:
+ return -EINVAL;
}
wdd->timeout = set_time;
- rc = regmap_write(wdt->regmap, reg_data->reg, regval);
-
if (!rc) {
/*
* Restart watchdog with new timeout period
@@ -147,10 +168,25 @@ static unsigned int mlxreg_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->tleft_idx];
- u32 regval;
+ u32 regval, msb, lsb;
int rc;
- rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ if (wdt->wdt_type == MLX_WDT_TYPE2) {
+ rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ } else {
+ /* WD_TYPE3 has 2 byte timeleft register */
+ if (wdt->regmap_val_sz == 1) {
+ rc = regmap_read(wdt->regmap, reg_data->reg, &lsb);
+ if (!rc) {
+ rc = regmap_read(wdt->regmap,
+ reg_data->reg + 1, &msb);
+ regval = (msb & 0xff) << 8 | (lsb & 0xff);
+ }
+ } else {
+ rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ }
+ }
+
/* Return 0 timeleft in case of failure register read. */
return rc == 0 ? regval : 0;
}
@@ -212,13 +248,23 @@ static void mlxreg_wdt_config(struct mlxreg_wdt *wdt,
wdt->wdd.info = &mlxreg_wdt_aux_info;
wdt->wdt_type = pdata->version;
- if (wdt->wdt_type == MLX_WDT_TYPE2) {
- wdt->wdd.ops = &mlxreg_wdt_ops_type2;
- wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
- } else {
+ switch (wdt->wdt_type) {
+ case MLX_WDT_TYPE1:
wdt->wdd.ops = &mlxreg_wdt_ops_type1;
wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE1;
+ break;
+ case MLX_WDT_TYPE2:
+ wdt->wdd.ops = &mlxreg_wdt_ops_type2;
+ wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
+ break;
+ case MLX_WDT_TYPE3:
+ wdt->wdd.ops = &mlxreg_wdt_ops_type2;
+ wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE3;
+ break;
+ default:
+ break;
}
+
wdt->wdd.min_timeout = MLXREG_WDT_MIN_TIMEOUT;
}
@@ -249,6 +295,11 @@ static int mlxreg_wdt_probe(struct platform_device *pdev)
wdt->wdd.parent = dev;
wdt->regmap = pdata->regmap;
+ rc = regmap_get_val_bytes(wdt->regmap);
+ if (rc < 0)
+ return -EINVAL;
+
+ wdt->regmap_val_sz = rc;
mlxreg_wdt_config(wdt, pdata);
if ((pdata->features & MLXREG_CORE_WD_FEATURE_NOWAYOUT))
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index 0bc72dd69b70..894aa63488d3 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -222,7 +222,7 @@ static long mv64x60_wdt_ioctl(struct file *file,
if (get_user(timeout, (int __user *)argp))
return -EFAULT;
mv64x60_wdt_set_timeout(timeout);
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(mv64x60_wdt_timeout, (int __user *)argp))
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index d7a560e348d5..f6902a337422 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -7,7 +7,7 @@
* Based off i8xx_tco.c:
* (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
* Reserved.
- * http://www.kernelconcepts.de
+ * https://www.kernelconcepts.de
*
* TCO timer driver for NV chipsets
* based on softdog.c by Alan Cox <alan@redhat.com>
@@ -250,7 +250,7 @@ static long nv_tco_ioctl(struct file *file, unsigned int cmd,
if (tco_timer_set_heartbeat(new_heartbeat))
return -EINVAL;
tco_timer_keepalive();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h
index d325e528010f..c65f82588386 100644
--- a/drivers/watchdog/nv_tco.h
+++ b/drivers/watchdog/nv_tco.h
@@ -9,7 +9,7 @@
*
* (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
* Reserved.
- * http://www.kernelconcepts.de
+ * https://www.kernelconcepts.de
*
* Neither kernel concepts nor Nils Faerber admit liability nor provide
* warranty for any of this software. This material is provided
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 73fbfc99083b..2d4504302c9e 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -433,7 +433,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
timeout = new_timeout;
pc87413_refresh();
- /* fall through - and return the new timeout... */
+ fallthrough; /* and return the new timeout */
case WDIOC_GETTIMEOUT:
new_timeout = timeout * 60;
return put_user(new_timeout, uarg.i);
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 7a0587fdc52c..e86fa7f8351d 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
pcwd_keepalive();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, argp);
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 81508a42a90c..54d86fcb1837 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -542,7 +542,7 @@ static long pcipcwd_ioctl(struct file *file, unsigned int cmd,
pcipcwd_keepalive();
}
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 2f44af1831d0..41a928eb91ed 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -452,7 +452,7 @@ static long usb_pcwd_ioctl(struct file *file, unsigned int cmd,
usb_pcwd_keepalive(usb_pcwd_device);
}
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
@@ -585,9 +585,8 @@ static struct notifier_block usb_pcwd_notifier = {
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
{
usb_free_urb(usb_pcwd->intr_urb);
- if (usb_pcwd->intr_buffer != NULL)
- usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
- usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
+ usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
+ usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
kfree(usb_pcwd);
}
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index aee3c2efd565..e74802f3a32e 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -230,7 +230,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (rc32434_wdt_set(new_timeout))
return -EINVAL;
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 1b9a6dc8f982..7008596a575f 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
riowd_timeout = (new_margin + 59) / 60;
riowd_writereg(p, riowd_timeout, WDTO_INDEX);
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(riowd_timeout * 60, (int __user *)argp);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index d456dd72d99a..705e8f7523e8 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -35,7 +35,11 @@
#define RTIWWDRX_NMI 0xa
-#define RTIWWDSIZE_50P 0x50
+#define RTIWWDSIZE_50P 0x50
+#define RTIWWDSIZE_25P 0x500
+#define RTIWWDSIZE_12P5 0x5000
+#define RTIWWDSIZE_6P25 0x50000
+#define RTIWWDSIZE_3P125 0x500000
#define WDENABLE_KEY 0xa98559da
@@ -48,7 +52,7 @@
#define DWDST BIT(1)
-static int heartbeat;
+static int heartbeat = DEFAULT_HEARTBEAT;
/*
* struct to hold data for each WDT device
@@ -79,11 +83,9 @@ static int rti_wdt_start(struct watchdog_device *wdd)
* be petted during the open window; not too early or not too late.
* The HW configuration options only allow for the open window size
* to be 50% or less than that; we obviouly want to configure the open
- * window as large as possible so we select the 50% option. To avoid
- * any glitches, we accommodate 5% safety margin also, so we setup
- * the min_hw_hearbeat at 55% of the timeout period.
+ * window as large as possible so we select the 50% option.
*/
- wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20;
+ wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
/* Generate NMI when wdt expires */
writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
@@ -110,7 +112,48 @@ static int rti_wdt_ping(struct watchdog_device *wdd)
return 0;
}
-static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
+{
+ /*
+ * RTI only supports a windowed mode, where the watchdog can only
+ * be petted during the open window; not too early or not too late.
+ * The HW configuration options only allow for the open window size
+ * to be 50% or less than that.
+ */
+ switch (wsize) {
+ case RTIWWDSIZE_50P:
+ /* 50% open window => 50% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 500 * heartbeat;
+ break;
+
+ case RTIWWDSIZE_25P:
+ /* 25% open window => 75% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 750 * heartbeat;
+ break;
+
+ case RTIWWDSIZE_12P5:
+ /* 12.5% open window => 87.5% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 875 * heartbeat;
+ break;
+
+ case RTIWWDSIZE_6P25:
+ /* 6.5% open window => 93.5% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 935 * heartbeat;
+ break;
+
+ case RTIWWDSIZE_3P125:
+ /* 3.125% open window => 96.9% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 969 * heartbeat;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int rti_wdt_get_timeleft_ms(struct watchdog_device *wdd)
{
u64 timer_counter;
u32 val;
@@ -123,11 +166,18 @@ static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR);
+ timer_counter *= 1000;
+
do_div(timer_counter, wdt->freq);
return timer_counter;
}
+static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ return rti_wdt_get_timeleft_ms(wdd) / 1000;
+}
+
static const struct watchdog_info rti_wdt_info = {
.options = WDIOF_KEEPALIVEPING,
.identity = "K3 RTI Watchdog",
@@ -148,6 +198,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
struct watchdog_device *wdd;
struct rti_wdt_device *wdt;
struct clk *clk;
+ u32 last_ping = 0;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -169,6 +220,14 @@ static int rti_wdt_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /*
+ * If watchdog is running at 32k clock, it is not accurate.
+ * Adjust frequency down in this case so that we don't pet
+ * the watchdog too often.
+ */
+ if (wdt->freq < 32768)
+ wdt->freq = wdt->freq * 9 / 10;
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret) {
@@ -185,11 +244,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
wdt->freq * 1000;
- wdd->timeout = DEFAULT_HEARTBEAT;
wdd->parent = dev;
- watchdog_init_timeout(wdd, heartbeat, dev);
-
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, 1);
watchdog_set_restart_priority(wdd, 128);
@@ -201,16 +257,53 @@ static int rti_wdt_probe(struct platform_device *pdev)
goto err_iomap;
}
+ if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) {
+ u32 time_left_ms;
+ u64 heartbeat_ms;
+ u32 wsize;
+
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ time_left_ms = rti_wdt_get_timeleft_ms(wdd);
+ heartbeat_ms = readl(wdt->base + RTIDWDPRLD);
+ heartbeat_ms <<= WDT_PRELOAD_SHIFT;
+ heartbeat_ms *= 1000;
+ do_div(heartbeat_ms, wdt->freq);
+ if (heartbeat_ms != heartbeat * 1000)
+ dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n");
+
+ heartbeat = heartbeat_ms;
+ heartbeat /= 1000;
+
+ wsize = readl(wdt->base + RTIWWDSIZECTRL);
+ ret = rti_wdt_setup_hw_hb(wdd, wsize);
+ if (ret) {
+ dev_err(dev, "bad window size.\n");
+ goto err_iomap;
+ }
+
+ last_ping = heartbeat_ms - time_left_ms;
+ if (time_left_ms > heartbeat_ms) {
+ dev_warn(dev, "time_left > heartbeat? Assuming last ping just before now.\n");
+ last_ping = 0;
+ }
+ }
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
ret = watchdog_register_device(wdd);
if (ret) {
dev_err(dev, "cannot register watchdog device\n");
goto err_iomap;
}
+ if (last_ping)
+ watchdog_set_last_hw_keepalive(wdd, last_ping);
+
return 0;
err_iomap:
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -221,6 +314,7 @@ static int rti_wdt_remove(struct platform_device *pdev)
watchdog_unregister_device(&wdt->wdd);
pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 9b93be00109f..27846c6bdfb0 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -127,7 +127,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
pre_margin = oscr_freq * time;
writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
- /*fall through*/
+ fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(pre_margin / oscr_freq, p);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index da2dad00d473..504be461f992 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -202,7 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd,
timeout = time;
sbwdog_set(user_dog, timeout);
sbwdog_pet(user_dog);
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/*
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index f2cbe6d880a8..a947a63fb44a 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -265,7 +265,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
timeout = new_timeout;
wdt_keepalive();
}
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 520b8dd77ed4..d640b26e18a6 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -195,7 +195,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (wdt_set_timeout(new_timeout))
return -EINVAL;
}
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 1b20b33879c4..04483d6453d6 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -154,7 +154,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
margin = time;
wdt_enable();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(margin, (int *)arg);
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 9673eb12dacd..f22ebe89fe13 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -234,7 +234,7 @@ static long sc1200wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
timeout = new_timeout;
sc1200wdt_write_data(WDTO, timeout);
- /* fall through - and return the new timeout */
+ fallthrough; /* and return the new timeout */
case WDIOC_GETTIMEOUT:
return put_user(timeout * 60, p);
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index fbe79bcc9297..e66e6b905964 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -321,7 +321,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
wdt_keepalive();
}
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 83949a385f62..d8b77fe10eba 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -295,7 +295,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
if (sch311x_wdt_set_heartbeat(new_timeout))
return -EINVAL;
sch311x_wdt_keepalive();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index c94098acb78f..7b5e18323f3f 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -186,7 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
margin = new_margin;
scx200_wdt_update_margin();
scx200_wdt_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 43de56acd767..7463df479d11 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -474,7 +474,7 @@ static long wb_smsc_wdt_ioctl(struct file *file,
return -EINVAL;
timeout = new_timeout;
wb_smsc_wdt_set_timeout(timeout);
- /* fall through - and return the new timeout... */
+ fallthrough; /* and return the new timeout */
case WDIOC_GETTIMEOUT:
new_timeout = timeout;
if (unit == UNIT_MINUTE)
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 3e4885c1545e..7a1096265f18 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -20,11 +20,13 @@
#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
+#include <linux/workqueue.h>
#define TIMER_MARGIN 60 /* Default is 60 seconds */
static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */
@@ -49,11 +51,34 @@ module_param(soft_panic, int, 0);
MODULE_PARM_DESC(soft_panic,
"Softdog action, set to 1 to panic, 0 to reboot (default=0)");
+static char *soft_reboot_cmd;
+module_param(soft_reboot_cmd, charp, 0000);
+MODULE_PARM_DESC(soft_reboot_cmd,
+ "Set reboot command. Emergency reboot takes place if unset");
+
+static bool soft_active_on_boot;
+module_param(soft_active_on_boot, bool, 0000);
+MODULE_PARM_DESC(soft_active_on_boot,
+ "Set to true to active Softdog on boot (default=false)");
+
static struct hrtimer softdog_ticktock;
static struct hrtimer softdog_preticktock;
+static int reboot_kthread_fn(void *data)
+{
+ kernel_restart(soft_reboot_cmd);
+ return -EPERM; /* Should not reach here */
+}
+
+static void reboot_work_fn(struct work_struct *unused)
+{
+ kthread_run(reboot_kthread_fn, NULL, "softdog_reboot");
+}
+
static enum hrtimer_restart softdog_fire(struct hrtimer *timer)
{
+ static bool soft_reboot_fired;
+
module_put(THIS_MODULE);
if (soft_noboot) {
pr_crit("Triggered - Reboot ignored\n");
@@ -62,6 +87,33 @@ static enum hrtimer_restart softdog_fire(struct hrtimer *timer)
panic("Software Watchdog Timer expired");
} else {
pr_crit("Initiating system reboot\n");
+ if (!soft_reboot_fired && soft_reboot_cmd != NULL) {
+ static DECLARE_WORK(reboot_work, reboot_work_fn);
+ /*
+ * The 'kernel_restart' is a 'might-sleep' operation.
+ * Also, executing it in system-wide workqueues blocks
+ * any driver from using the same workqueue in its
+ * shutdown callback function. Thus, we should execute
+ * the 'kernel_restart' in a standalone kernel thread.
+ * But since starting a kernel thread is also a
+ * 'might-sleep' operation, so the 'reboot_work' is
+ * required as a launcher of the kernel thread.
+ *
+ * After request the reboot, restart the timer to
+ * schedule an 'emergency_restart' reboot after
+ * 'TIMER_MARGIN' seconds. It's because if the softdog
+ * hangs, it might be because of scheduling issues. And
+ * if that is the case, both 'schedule_work' and
+ * 'kernel_restart' may possibly be malfunctional at the
+ * same time.
+ */
+ soft_reboot_fired = true;
+ schedule_work(&reboot_work);
+ hrtimer_add_expires_ns(timer,
+ (u64)TIMER_MARGIN * NSEC_PER_SEC);
+
+ return HRTIMER_RESTART;
+ }
emergency_restart();
pr_crit("Reboot didn't ?????\n");
}
@@ -145,12 +197,17 @@ static int __init softdog_init(void)
softdog_preticktock.function = softdog_pretimeout;
}
+ if (soft_active_on_boot)
+ softdog_ping(&softdog_dev);
+
ret = watchdog_register_device(&softdog_dev);
if (ret)
return ret;
pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
soft_noboot, softdog_dev.timeout, soft_panic, nowayout);
+ pr_info(" soft_reboot_cmd=%s soft_active_on_boot=%d\n",
+ soft_reboot_cmd ?: "<not set>", soft_active_on_boot);
return 0;
}
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 93bd302ae7c5..85e9664318c9 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -7,7 +7,7 @@
* Based on i8xx_tco.c:
* (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
* Reserved.
- * http://www.kernelconcepts.de
+ * https://www.kernelconcepts.de
*
* See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide",
* AMD Publication 45482 "AMD SB800-Series Southbridges Register
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 5f05a45ac187..b50757882a98 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -235,7 +235,7 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
sunxi_wdt = devm_kzalloc(dev, sizeof(*sunxi_wdt), GFP_KERNEL);
if (!sunxi_wdt)
- return -EINVAL;
+ return -ENOMEM;
sunxi_wdt->wdt_regs = of_device_get_match_data(dev);
if (!sunxi_wdt->wdt_regs)
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 6b3b667e6f23..5772cc5d3780 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -289,7 +289,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
timeout = new_timeout;
wdt_keepalive();
}
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 5212e68c6b01..fd64ae77780a 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -422,7 +422,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
wdt_keepalive();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index a6925847f76f..a8a1ed215e1e 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -174,7 +174,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
timeout = new_timeout;
wafwdt_stop();
wafwdt_start();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index b9dc2c352151..6798addabd5a 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -275,15 +275,18 @@ static int watchdog_start(struct watchdog_device *wdd)
set_bit(_WDOG_KEEPALIVE, &wd_data->status);
started_at = ktime_get();
- if (watchdog_hw_running(wdd) && wdd->ops->ping)
- err = wdd->ops->ping(wdd);
- else
+ if (watchdog_hw_running(wdd) && wdd->ops->ping) {
+ err = __watchdog_ping(wdd);
+ if (err == 0)
+ set_bit(WDOG_ACTIVE, &wdd->status);
+ } else {
err = wdd->ops->start(wdd);
- if (err == 0) {
- set_bit(WDOG_ACTIVE, &wdd->status);
- wd_data->last_keepalive = started_at;
- wd_data->last_hw_keepalive = started_at;
- watchdog_update_worker(wdd);
+ if (err == 0) {
+ set_bit(WDOG_ACTIVE, &wdd->status);
+ wd_data->last_keepalive = started_at;
+ wd_data->last_hw_keepalive = started_at;
+ watchdog_update_worker(wdd);
+ }
}
return err;
@@ -587,7 +590,7 @@ static DEVICE_ATTR_RW(pretimeout_governor);
static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct watchdog_device *wdd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
@@ -776,7 +779,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
err = watchdog_ping(wdd);
if (err < 0)
break;
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/* timeout == 0 means that we don't know the timeout */
if (wdd->timeout == 0) {
@@ -916,7 +919,7 @@ static int watchdog_release(struct inode *inode, struct file *file)
* or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
* watchdog_stop will fail.
*/
- if (!test_bit(WDOG_ACTIVE, &wdd->status))
+ if (!watchdog_active(wdd))
err = 0;
else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
!(wdd->info->options & WDIOF_MAGICCLOSE))
@@ -994,6 +997,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
if (IS_ERR_OR_NULL(watchdog_kworker))
return -ENODEV;
+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+ wd_data->dev.class = &watchdog_class;
+ wd_data->dev.parent = wdd->parent;
+ wd_data->dev.groups = wdd->groups;
+ wd_data->dev.release = watchdog_core_data_release;
+ dev_set_drvdata(&wd_data->dev, wdd);
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+
kthread_init_work(&wd_data->work, watchdog_ping_work);
hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
wd_data->timer.function = watchdog_timer_expired;
@@ -1014,15 +1026,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
}
}
- device_initialize(&wd_data->dev);
- wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
- wd_data->dev.class = &watchdog_class;
- wd_data->dev.parent = wdd->parent;
- wd_data->dev.groups = wdd->groups;
- wd_data->dev.release = watchdog_core_data_release;
- dev_set_drvdata(&wd_data->dev, wdd);
- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
-
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
@@ -1136,6 +1139,36 @@ void watchdog_dev_unregister(struct watchdog_device *wdd)
}
/*
+ * watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog
+ * @wdd: watchdog device
+ * @last_ping_ms: time since last HW heartbeat
+ *
+ * Adjusts the last known HW keepalive time for a watchdog timer.
+ * This is needed if the watchdog is already running when the probe
+ * function is called, and it can't be pinged immediately. This
+ * function must be called immediately after watchdog registration,
+ * and min_hw_heartbeat_ms must be set for this to be useful.
+ */
+int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
+ unsigned int last_ping_ms)
+{
+ struct watchdog_core_data *wd_data;
+ ktime_t now;
+
+ if (!wdd)
+ return -EINVAL;
+
+ wd_data = wdd->wd_data;
+
+ now = ktime_get();
+
+ wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
+
+ return __watchdog_ping(wdd);
+}
+EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
+
+/*
* watchdog_dev_init: init dev part of watchdog core
*
* Allocate a range of chardev nodes to use for watchdog devices
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 184a06a74f83..c00627825de8 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -332,7 +332,7 @@ static long wdrtas_ioctl(struct file *file, unsigned int cmd,
wdrtas_interval = i;
else
wdrtas_interval = wdrtas_get_interval(i);
- /* fallthrough */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(wdrtas_interval, argp);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index f9054cb0f8e2..a9e40b5c633e 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (wdt_set_heartbeat(new_heartbeat))
return -EINVAL;
wdt_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index e60993d0767e..110249e5f642 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
soft_margin = new_margin;
reload = soft_margin * (mem_fclk_21285 / 256);
watchdog_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(soft_margin, int_arg);
break;
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 066a4fb4d75b..c9b8e863f70f 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
wdt977_keepalive();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index e528024faa41..c3254ba5ace6 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -426,7 +426,7 @@ static long wdtpci_ioctl(struct file *file, unsigned int cmd,
if (wdtpci_set_heartbeat(new_heartbeat))
return -EINVAL;
wdtpci_ping();
- /* fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 1d339ef92422..ea6c1e7e3e42 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -52,9 +52,7 @@ config XEN_BALLOON_MEMORY_HOTPLUG
config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
int "Hotplugged memory limit (in GiB) for a PV guest"
- default 512 if X86_64
- default 4 if X86_32
- range 0 64 if X86_32
+ default 512
depends on XEN_HAVE_PVMMU
depends on XEN_BALLOON_MEMORY_HOTPLUG
help
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 140c7bf33a98..90b8f56fbadb 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -156,7 +156,7 @@ int get_evtchn_to_irq(evtchn_port_t evtchn)
/* Get info for IRQ */
struct irq_info *info_for_irq(unsigned irq)
{
- return irq_get_handler_data(irq);
+ return irq_get_chip_data(irq);
}
/* Constructors for packed IRQ information. */
@@ -377,7 +377,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
- irq_set_handler_data(irq, info);
+ irq_set_chip_data(irq, info);
list_add_tail(&info->list, &xen_irq_list_head);
}
@@ -426,14 +426,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (WARN_ON(!info))
return;
list_del(&info->list);
- irq_set_handler_data(irq, NULL);
+ irq_set_chip_data(irq, NULL);
WARN_ON(info->refcnt > 0);
@@ -603,7 +603,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq)
{
evtchn_port_t evtchn = evtchn_from_irq(irq);
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (info->refcnt > 0) {
info->refcnt--;
@@ -1108,7 +1108,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (WARN_ON(!info))
return;
@@ -1142,7 +1142,7 @@ int evtchn_make_refcounted(evtchn_port_t evtchn)
if (irq == -1)
return -ENOENT;
- info = irq_get_handler_data(irq);
+ info = irq_get_chip_data(irq);
if (!info)
return -ENOENT;
@@ -1170,7 +1170,7 @@ int evtchn_get(evtchn_port_t evtchn)
if (irq == -1)
goto done;
- info = irq_get_handler_data(irq);
+ info = irq_get_chip_data(irq);
if (!info)
goto done;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 75d3bb948bf3..b1b6eebafd5d 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -613,6 +613,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
goto fail_detach;
}
+ /* Check that we have zero offset. */
+ if (sgt->sgl->offset) {
+ ret = ERR_PTR(-EINVAL);
+ pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
+ sgt->sgl->offset);
+ goto fail_unmap;
+ }
+
/* Check number of pages that imported buffer has. */
if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
ret = ERR_PTR(-EINVAL);
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index b43b5595e988..72d725a0ab5c 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -1263,7 +1263,7 @@ static void pvcalls_front_changed(struct xenbus_device *dev,
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state */
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 7457213b8915..f914b72557ef 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -229,7 +229,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
case ACPI_NOTIFY_BUS_CHECK:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived BUS CHECK notification for device\n"));
- /* Fall Through */
+ fallthrough;
case ACPI_NOTIFY_DEVICE_CHECK:
if (event == ACPI_NOTIFY_DEVICE_CHECK)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index f2115587855f..b500466a6c37 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -545,7 +545,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
xenbus_switch_state(xdev, XenbusStateClosed);
if (xenbus_dev_is_online(xdev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
device_unregister(&xdev->dev);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 75c0a2e9a6db..1e8cfd80a4e6 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1185,7 +1185,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
break;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 786fbb7d8be0..907bcbb93afb 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -379,8 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
int i, j;
for (i = 0; i < nr_pages; i++) {
- err = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_gfn(vaddr), 0);
+ unsigned long gfn;
+
+ if (is_vmalloc_addr(vaddr))
+ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+ else
+ gfn = virt_to_gfn(vaddr);
+
+ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 15379089853b..480944606a3c 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -401,12 +401,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state)
case XenbusStateConnected:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
xenbus_reset_wait_for_backend(be, XenbusStateClosing);
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
xenbus_reset_wait_for_backend(be, XenbusStateClosed);
- /* fall through */
+ fallthrough;
case XenbusStateClosed:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);