aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accessibility/speakup/fakekey.c4
-rw-r--r--drivers/accessibility/speakup/serialio.c2
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_acntsa.c2
-rw-r--r--drivers/accessibility/speakup/speakup_apollo.c2
-rw-r--r--drivers/accessibility/speakup/speakup_audptr.c2
-rw-r--r--drivers/accessibility/speakup/speakup_bns.c2
-rw-r--r--drivers/accessibility/speakup/speakup_decext.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dummy.c2
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_ltlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c3
-rw-r--r--drivers/accessibility/speakup/speakup_spkout.c2
-rw-r--r--drivers/accessibility/speakup/speakup_txprt.c2
-rw-r--r--drivers/amba/bus.c28
-rw-r--r--drivers/android/binder.c201
-rw-r--r--drivers/android/binder_alloc.c22
-rw-r--r--drivers/android/binder_internal.h5
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/pata_palmld.c3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_topology.c5
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c15
-rw-r--r--drivers/base/dd.c44
-rw-r--r--drivers/base/driver.c70
-rw-r--r--drivers/base/firmware_loader/Kconfig43
-rw-r--r--drivers/base/firmware_loader/Makefile2
-rw-r--r--drivers/base/firmware_loader/fallback.c430
-rw-r--r--drivers/base/firmware_loader/fallback.h46
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c94
-rw-r--r--drivers/base/firmware_loader/sysfs.c422
-rw-r--r--drivers/base/firmware_loader/sysfs.h117
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c397
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.h41
-rw-r--r--drivers/base/physical_location.c143
-rw-r--r--drivers/base/physical_location.h16
-rw-r--r--drivers/base/platform.c34
-rw-r--r--drivers/base/property.c96
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/nbd.c114
-rw-r--r--drivers/block/null_blk/main.c6
-rw-r--r--drivers/block/null_blk/null_blk.h7
-rw-r--r--drivers/block/null_blk/zoned.c6
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c220
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c25
-rw-r--r--drivers/bus/mhi/Kconfig1
-rw-r--r--drivers/bus/mhi/Makefile3
-rw-r--r--drivers/bus/mhi/common.h22
-rw-r--r--drivers/bus/mhi/ep/Kconfig10
-rw-r--r--drivers/bus/mhi/ep/Makefile2
-rw-r--r--drivers/bus/mhi/ep/internal.h218
-rw-r--r--drivers/bus/mhi/ep/main.c1591
-rw-r--r--drivers/bus/mhi/ep/mmio.c273
-rw-r--r--drivers/bus/mhi/ep/ring.c207
-rw-r--r--drivers/bus/mhi/ep/sm.c148
-rw-r--r--drivers/bus/mhi/host/boot.c22
-rw-r--r--drivers/bus/mhi/host/init.c89
-rw-r--r--drivers/bus/mhi/host/internal.h7
-rw-r--r--drivers/bus/mhi/host/main.c18
-rw-r--r--drivers/bus/mhi/host/pci_generic.c133
-rw-r--r--drivers/bus/mhi/host/pm.c24
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/misc.c24
-rw-r--r--drivers/char/pcmcia/synclink_cs.c10
-rw-r--r--drivers/char/ttyprintk.c16
-rw-r--r--drivers/char/xillybus/xillybus_class.c26
-rw-r--r--drivers/char/xillybus/xillyusb.c1
-rw-r--r--drivers/clk/imx/clk-scu.c7
-rw-r--r--drivers/clk/pxa/clk-pxa.c8
-rw-r--r--drivers/clk/pxa/clk-pxa.h9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c46
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c68
-rw-r--r--drivers/clk/pxa/clk-pxa2xx.h58
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c139
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/bcm_kona_timer.c14
-rw-r--r--drivers/clocksource/jcore-pit.c5
-rw-r--r--drivers/clocksource/mips-gic-timer.c9
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c5
-rw-r--r--drivers/clocksource/timer-digicolor.c5
-rw-r--r--drivers/clocksource/timer-gxp.c209
-rw-r--r--drivers/clocksource/timer-ixp4xx.c25
-rw-r--r--drivers/clocksource/timer-lpc32xx.c6
-rw-r--r--drivers/clocksource/timer-orion.c5
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c2
-rw-r--r--drivers/clocksource/timer-pistachio.c5
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/clocksource/timer-sp804.c10
-rw-r--r--drivers/clocksource/timer-sun4i.c5
-rw-r--r--drivers/clocksource/timer-sun5i.c5
-rw-r--r--drivers/clocksource/timer-ti-dm.c3
-rw-r--r--drivers/comedi/drivers.c2
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c65
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c95
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h21
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c55
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c140
-rw-r--r--drivers/dio/dio.c5
-rw-r--r--drivers/extcon/Kconfig3
-rw-r--r--drivers/extcon/extcon-axp288.c4
-rw-r--r--drivers/extcon/extcon-intel-int3496.c54
-rw-r--r--drivers/extcon/extcon-ptn5150.c36
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon-usb-gpio.c15
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/extcon/extcon.c37
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/dmi-sysfs.c2
-rw-r--r--drivers/firmware/edd.c3
-rw-r--r--drivers/firmware/efi/Kconfig52
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c4
-rw-r--r--drivers/firmware/stratix10-svc.c12
-rw-r--r--drivers/firmware/xilinx/zynqmp.c131
-rw-r--r--drivers/fpga/Makefile6
-rw-r--r--drivers/fpga/dfl-pci.c9
-rw-r--r--drivers/fpga/dfl.c38
-rw-r--r--drivers/fpga/dfl.h1
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/fpga/fpga-region.c6
-rw-r--r--drivers/fpga/of-fpga-region.c22
-rw-r--r--drivers/gpio/gpio-adp5588.c19
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-tegra186.c81
-rw-r--r--drivers/gpio/gpiolib-cdev.c252
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2964
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm394
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm244
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c248
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h2
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c9
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/host1x/Kconfig5
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/context_bus.c31
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hte/Kconfig33
-rw-r--r--drivers/hte/Makefile3
-rw-r--r--drivers/hte/hte-tegra194-test.c238
-rw-r--r--drivers/hte/hte-tegra194.c730
-rw-r--r--drivers/hte/hte.c947
-rw-r--r--drivers/hv/vmbus_drv.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c136
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c180
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h120
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/accel/adxl355_core.c7
-rw-r--r--drivers/iio/accel/adxl367.c1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c4
-rw-r--r--drivers/iio/accel/dmard09.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c1
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/mma8452.c1
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c1
-rw-r--r--drivers/iio/accel/st_accel.h28
-rw-r--r--drivers/iio/accel/st_accel_core.c14
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c5
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7124.c86
-rw-r--r--drivers/iio/adc/ad7192.c68
-rw-r--r--drivers/iio/adc/ad7266.c44
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c143
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/ina2xx-adc.c3
-rw-r--r--drivers/iio/adc/palmas_gpadc.c3
-rw-r--r--drivers/iio/adc/sc27xx_adc.c470
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c5
-rw-r--r--drivers/iio/adc/stmpe-adc.c21
-rw-r--r--drivers/iio/adc/ti-ads1015.c398
-rw-r--r--drivers/iio/adc/ti-ads8688.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c4
-rw-r--r--drivers/iio/afe/Kconfig1
-rw-r--r--drivers/iio/afe/iio-rescale.c5
-rw-r--r--drivers/iio/buffer/kfifo_buf.c10
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c5
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c1
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c50
-rw-r--r--drivers/iio/dac/Kconfig4
-rw-r--r--drivers/iio/dac/ad5064.c2
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/dac/ad5755.c2
-rw-r--r--drivers/iio/dac/ad5791.c2
-rw-r--r--drivers/iio/dac/ad7303.c2
-rw-r--r--drivers/iio/dac/ltc2632.c8
-rw-r--r--drivers/iio/dac/ltc2688.c19
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c20
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c48
-rw-r--r--drivers/iio/frequency/ad9523.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c8
-rw-r--r--drivers/iio/gyro/mpu3050-core.c14
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/mpu3050.h2
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c15
-rw-r--r--drivers/iio/health/max30100.c1
-rw-r--r--drivers/iio/health/max30102.c1
-rw-r--r--drivers/iio/imu/adis16480.c91
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c27
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c13
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c18
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c42
-rw-r--r--drivers/iio/industrialio-core.c46
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-trigger.c2
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/stk3310.c25
-rw-r--r--drivers/iio/light/tsl2772.c25
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c15
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c15
-rw-r--r--drivers/iio/multiplexer/Kconfig1
-rw-r--r--drivers/iio/multiplexer/iio-mux.c49
-rw-r--r--drivers/iio/pressure/st_pressure_core.c8
-rw-r--r--drivers/iio/proximity/mb1232.c8
-rw-r--r--drivers/iio/proximity/ping.c5
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c7
-rw-r--r--drivers/iio/temperature/ltc2983.c236
-rw-r--r--drivers/iio/temperature/max31856.c6
-rw-r--r--drivers/iio/temperature/max31865.c4
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c11
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/misc/ati_remote2.c2
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/soc_button_array.c4
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/misc/yealink.c2
-rw-r--r--drivers/input/mouse/bcm5974.c7
-rw-r--r--drivers/input/mouse/pxa930_trkball.c1
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c130
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c42
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c43
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c16
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h6
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h6
-rw-r--r--drivers/interconnect/qcom/msm8916.c12
-rw-r--r--drivers/interconnect/qcom/msm8939.c16
-rw-r--r--drivers/interconnect/qcom/msm8974.c28
-rw-r--r--drivers/interconnect/qcom/msm8996.c16
-rw-r--r--drivers/interconnect/qcom/osm-l3.c16
-rw-r--r--drivers/interconnect/qcom/qcm2290.c24
-rw-r--r--drivers/interconnect/qcom/qcs404.c12
-rw-r--r--drivers/interconnect/qcom/sc7180.c66
-rw-r--r--drivers/interconnect/qcom/sc7280.c72
-rw-r--r--drivers/interconnect/qcom/sc8180x.c1895
-rw-r--r--drivers/interconnect/qcom/sc8180x.h7
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c2438
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h209
-rw-r--r--drivers/interconnect/qcom/sdm660.c24
-rw-r--r--drivers/interconnect/qcom/sdm845.c32
-rw-r--r--drivers/interconnect/qcom/sdx55.c12
-rw-r--r--drivers/interconnect/qcom/sdx65.c231
-rw-r--r--drivers/interconnect/qcom/sdx65.h65
-rw-r--r--drivers/interconnect/qcom/sm8150.c66
-rw-r--r--drivers/interconnect/qcom/sm8250.c66
-rw-r--r--drivers/interconnect/qcom/sm8350.c60
-rw-r--r--drivers/interconnect/qcom/sm8450.c68
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c6
-rw-r--r--drivers/leds/leds-locomo.c1
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/btree.c59
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c31
-rw-r--r--drivers/md/bcache/journal.h2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/bcache/writeback.c133
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md-multipath.c15
-rw-r--r--drivers/md/md.c185
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c29
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid10.c54
-rw-r--r--drivers/md/raid5-cache.c5
-rw-r--r--drivers/md/raid5-ppl.c27
-rw-r--r--drivers/md/raid5.c37
-rw-r--r--drivers/media/rc/ati_remote.c4
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/xbox_remote.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/mfd/tc6393xb.c130
-rw-r--r--drivers/misc/altera-stapl/altera.c56
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c29
-rw-r--r--drivers/misc/cardreader/alcor_pci.c6
-rw-r--r--drivers/misc/cardreader/rts5261.c115
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c1
-rw-r--r--drivers/misc/fastrpc.c18
-rw-r--r--drivers/misc/habanalabs/common/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c413
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c89
-rw-r--r--drivers/misc/habanalabs/common/context.c4
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c304
-rw-r--r--drivers/misc/habanalabs/common/device.c280
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c86
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h415
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c44
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c108
-rw-r--r--drivers/misc/habanalabs/common/irq.c14
-rw-r--r--drivers/misc/habanalabs/common/memory.c289
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c349
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c296
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c297
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c412
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/goya/goya.c363
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h70
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h10
-rw-r--r--drivers/misc/lkdtm/bugs.c96
-rw-r--r--drivers/misc/lkdtm/cfi.c145
-rw-r--r--drivers/misc/lkdtm/core.c138
-rw-r--r--drivers/misc/lkdtm/fortify.c17
-rw-r--r--drivers/misc/lkdtm/heap.c48
-rw-r--r--drivers/misc/lkdtm/lkdtm.h142
-rw-r--r--drivers/misc/lkdtm/perms.c47
-rw-r--r--drivers/misc/lkdtm/powerpc.c11
-rw-r--r--drivers/misc/lkdtm/refcount.c65
-rw-r--r--drivers/misc/lkdtm/stackleak.c13
-rw-r--r--drivers/misc/lkdtm/usercopy.c146
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c2
-rw-r--r--drivers/misc/pvpanic/pvpanic.c10
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/misc/vmw_vmci/Kconfig2
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c15
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c12
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c3
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c121
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/vmt.c1
-rw-r--r--drivers/mtd/ubi/wl.c33
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/net/dsa/b53/b53_common.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c35
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c38
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c10
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c20
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/nvme/host/core.c13
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/ioctl.c3
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/target/passthru.c5
-rw-r--r--drivers/nvmem/Kconfig13
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/apple-efuses.c80
-rw-r--r--drivers/nvmem/bcm-ocotp.c2
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/core.c1
-rw-r--r--drivers/nvmem/layerscape-sfp.c36
-rw-r--r--drivers/nvmem/qfprom.c3
-rw-r--r--drivers/nvmem/sunplus-ocotp.c4
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pcmcia/Makefile13
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c137
-rw-r--r--drivers/pcmcia/pxa2xx_base.c48
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c165
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c127
-rw-r--r--drivers/pcmcia/pxa2xx_hx4700.c118
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c110
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c162
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c111
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c3
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c200
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c182
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c137
-rw-r--r--drivers/pcmcia/sa1111_generic.c1
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c1
-rw-r--r--drivers/pcmcia/soc_common.c19
-rw-r--r--drivers/pcmcia/soc_common.h120
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/allwinner/phy-sun6i-mipi-dphy.c166
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c193
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c276
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c50
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c29
-rw-r--r--drivers/phy/phy-can-transceiver.c24
-rw-r--r--drivers/phy/phy-core.c44
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c124
-rw-r--r--drivers/phy/rockchip/phy-rockchip-dphy-rx0.c7
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c129
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c6
-rw-r--r--drivers/power/supply/axp288_charger.c17
-rw-r--r--drivers/power/supply/charger-manager.c7
-rw-r--r--drivers/power/supply/max8997_charger.c8
-rw-r--r--drivers/power/supply/tosa_battery.c172
-rw-r--r--drivers/rpmsg/rpmsg_core.c42
-rw-r--r--drivers/rpmsg/rpmsg_internal.h5
-rw-r--r--drivers/rpmsg/rpmsg_ns.c4
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/s390/cio/cio.h6
-rw-r--r--drivers/s390/cio/css.c28
-rw-r--r--drivers/s390/virtio/virtio_ccw.c34
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c263
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c288
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c50
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c2
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c7
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sd.h4
-rw-r--r--drivers/scsi/sd_zbc.c26
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/ufs/ufs.h623
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h116
-rw-r--r--drivers/scsi/ufs/ufshcd.h1230
-rw-r--r--drivers/scsi/ufs/ufshci.h510
-rw-r--r--drivers/scsi/ufs/unipro.h316
-rw-r--r--drivers/slimbus/qcom-ctrl.c4
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c23
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c2
-rw-r--r--drivers/soc/pxa/Kconfig8
-rw-r--r--drivers/soc/pxa/Makefile6
-rw-r--r--drivers/soc/pxa/mfp.c282
-rw-r--r--drivers/soc/pxa/ssp.c231
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c203
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c7
-rw-r--r--drivers/soundwire/bus.c27
-rw-r--r--drivers/soundwire/cadence_master.c42
-rw-r--r--drivers/soundwire/intel.c11
-rw-r--r--drivers/soundwire/qcom.c22
-rw-r--r--drivers/soundwire/stream.c1
-rw-r--r--drivers/spi/spi.c26
-rw-r--r--drivers/staging/Kconfig3
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c2
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/audio_codec.c32
-rw-r--r--drivers/staging/greybus/pwm.c1
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c2
-rw-r--r--drivers/staging/iio/cdc/ad7746.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c19
-rw-r--r--drivers/staging/ks7010/ks_wlan.h2
-rw-r--r--drivers/staging/most/dim2/dim2.c29
-rw-r--r--drivers/staging/qlge/qlge.h1
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c3
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c76
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c337
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c163
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c45
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c10
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c321
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c755
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c70
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c109
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c319
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c135
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c104
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c6
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c2
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c2
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c22
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c4
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c8
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c49
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c15
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c8
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c7
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c8
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c355
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c10
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/r8188eu/include/basic_types.h73
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h7
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h34
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h63
-rw-r--r--drivers/staging/r8188eu/include/odm.h29
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h47
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h3
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h10
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h55
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_fw.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl.h79
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h62
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h23
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h4
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h22
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h4
-rw-r--r--drivers/staging/r8188eu/include/usb_vendor_req.h35
-rw-r--r--drivers/staging/r8188eu/include/wifi.h60
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c295
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c1
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c45
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c19
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c20
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c10
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c63
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c30
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/drv_types.h3
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h4
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c16
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h4
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c102
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1
-rw-r--r--drivers/staging/rtl8712/sta_info.h1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c14
-rw-r--r--drivers/staging/rtl8712/usb_ops.c27
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c21
-rw-r--r--drivers/staging/rtl8712/wifi.h1
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c24
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c54
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c44
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c66
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c56
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c15
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h295
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl.h72
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c12
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c1
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset89
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt337
-rw-r--r--drivers/staging/unisys/Kconfig16
-rw-r--r--drivers/staging/unisys/MAINTAINERS5
-rw-r--r--drivers/staging/unisys/Makefile7
-rw-r--r--drivers/staging/unisys/TODO16
-rw-r--r--drivers/staging/unisys/include/iochannel.h571
-rw-r--r--drivers/staging/unisys/visorhba/Kconfig15
-rw-r--r--drivers/staging/unisys/visorhba/Makefile10
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c1142
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig16
-rw-r--r--drivers/staging/unisys/visorinput/Makefile7
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c788
-rw-r--r--drivers/staging/unisys/visornic/Kconfig16
-rw-r--r--drivers/staging/unisys/visornic/Makefile10
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2148
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig8
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/TODO10
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c86
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c33
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c24
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c33
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c26
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c13
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h7
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h6
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h15
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c11
-rw-r--r--drivers/staging/vme/Makefile2
-rw-r--r--drivers/staging/vme_user/Kconfig (renamed from drivers/staging/vme/devices/Kconfig)2
-rw-r--r--drivers/staging/vme_user/Makefile (renamed from drivers/staging/vme/devices/Makefile)0
-rw-r--r--drivers/staging/vme_user/vme_user.c (renamed from drivers/staging/vme/devices/vme_user.c)2
-rw-r--r--drivers/staging/vme_user/vme_user.h (renamed from drivers/staging/vme/devices/vme_user.h)0
-rw-r--r--drivers/staging/vt6655/baseband.c15
-rw-r--r--drivers/staging/vt6655/card.c38
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/channel.c6
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/key.c1
-rw-r--r--drivers/staging/vt6655/mac.c1
-rw-r--r--drivers/staging/vt6655/mac.h271
-rw-r--r--drivers/staging/vt6655/rf.c10
-rw-r--r--drivers/staging/vt6655/rxtx.c6
-rw-r--r--drivers/staging/vt6655/srom.c19
-rw-r--r--drivers/staging/vt6655/tmacro.h43
-rw-r--r--drivers/staging/vt6655/upc.h25
-rw-r--r--drivers/staging/vt6656/channel.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c10
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c34
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c8
-rw-r--r--drivers/target/target_core_pscsi.c3
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/thunderbolt/ctl.c15
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/thunderbolt/path.c6
-rw-r--r--drivers/thunderbolt/switch.c109
-rw-r--r--drivers/thunderbolt/tb.c25
-rw-r--r--drivers/thunderbolt/tb.h6
-rw-r--r--drivers/thunderbolt/tb_msgs.h39
-rw-r--r--drivers/thunderbolt/tb_regs.h5
-rw-r--r--drivers/thunderbolt/test.c108
-rw-r--r--drivers/thunderbolt/tunnel.c18
-rw-r--r--drivers/thunderbolt/tunnel.h4
-rw-r--r--drivers/thunderbolt/usb4_port.c38
-rw-r--r--drivers/thunderbolt/xdomain.c609
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/hvc/Kconfig19
-rw-r--r--drivers/tty/hvc/hvc_dcc.c194
-rw-r--r--drivers/tty/hvc/hvc_opal.c6
-rw-r--r--drivers/tty/hvc/hvc_vio.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c5
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/mxser.c5
-rw-r--r--drivers/tty/n_gsm.c37
-rw-r--r--drivers/tty/n_tty.c73
-rw-r--r--drivers/tty/serial/8250/8250.h41
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_dma.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c229
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c116
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h51
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c8
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c7
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c480
-rw-r--r--drivers/tty/serial/8250/8250_port.c140
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig9
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c48
-rw-r--r--drivers/tty/serial/amba-pl011.h35
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c1
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c66
-rw-r--r--drivers/tty/serial/icom.c538
-rw-r--r--drivers/tty/serial/icom.h274
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c8
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c8
-rw-r--r--drivers/tty/serial/max310x.c1
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/meson_uart.c40
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/msm_serial.c5
-rw-r--r--drivers/tty/serial/omap-serial.c13
-rw-r--r--drivers/tty/serial/owl-uart.c7
-rw-r--r--drivers/tty/serial/pch_uart.c77
-rw-r--r--drivers/tty/serial/pic32_uart.c159
-rw-r--r--drivers/tty/serial/pic32_uart.h125
-rw-r--r--drivers/tty/serial/pmac_zilog.c69
-rw-r--r--drivers/tty/serial/pmac_zilog.h11
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c58
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sa1100.c4
-rw-r--r--drivers/tty/serial/samsung_tty.c13
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial_core.c89
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c6
-rw-r--r--drivers/tty/serial/sifive.c20
-rw-r--r--drivers/tty/serial/st-asc.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c225
-rw-r--r--drivers/tty/serial/stm32-usart.h3
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c46
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/sysrq.c27
-rw-r--r--drivers/tty/tty_baudrate.c35
-rw-r--r--drivers/tty/tty_ioctl.c2
-rw-r--r--drivers/tty/tty_jobctrl.c4
-rw-r--r--drivers/ufs/Kconfig30
-rw-r--r--drivers/ufs/Makefile5
-rw-r--r--drivers/ufs/core/Kconfig60
-rw-r--r--drivers/ufs/core/Makefile10
-rw-r--r--drivers/ufs/core/ufs-debugfs.c (renamed from drivers/scsi/ufs/ufs-debugfs.c)2
-rw-r--r--drivers/ufs/core/ufs-debugfs.h (renamed from drivers/scsi/ufs/ufs-debugfs.h)0
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c (renamed from drivers/scsi/ufs/ufs-fault-injection.c)0
-rw-r--r--drivers/ufs/core/ufs-fault-injection.h (renamed from drivers/scsi/ufs/ufs-fault-injection.h)0
-rw-r--r--drivers/ufs/core/ufs-hwmon.c (renamed from drivers/scsi/ufs/ufs-hwmon.c)2
-rw-r--r--drivers/ufs/core/ufs-sysfs.c (renamed from drivers/scsi/ufs/ufs-sysfs.c)2
-rw-r--r--drivers/ufs/core/ufs-sysfs.h (renamed from drivers/scsi/ufs/ufs-sysfs.h)0
-rw-r--r--drivers/ufs/core/ufs_bsg.c (renamed from drivers/scsi/ufs/ufs_bsg.c)2
-rw-r--r--drivers/ufs/core/ufs_bsg.h (renamed from drivers/scsi/ufs/ufs_bsg.h)0
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c (renamed from drivers/scsi/ufs/ufshcd-crypto.c)2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h (renamed from drivers/scsi/ufs/ufshcd-crypto.h)4
-rw-r--r--drivers/ufs/core/ufshcd-priv.h (renamed from drivers/scsi/ufs/ufshcd-priv.h)2
-rw-r--r--drivers/ufs/core/ufshcd.c (renamed from drivers/scsi/ufs/ufshcd.c)9
-rw-r--r--drivers/ufs/core/ufshpb.c (renamed from drivers/scsi/ufs/ufshpb.c)8
-rw-r--r--drivers/ufs/core/ufshpb.h (renamed from drivers/scsi/ufs/ufshpb.h)0
-rw-r--r--drivers/ufs/host/Kconfig (renamed from drivers/scsi/ufs/Kconfig)75
-rw-r--r--drivers/ufs/host/Makefile (renamed from drivers/scsi/ufs/Makefile)12
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c (renamed from drivers/scsi/ufs/cdns-pltfrm.c)0
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c (renamed from drivers/scsi/ufs/tc-dwc-g210-pci.c)2
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c (renamed from drivers/scsi/ufs/tc-dwc-g210-pltfrm.c)0
-rw-r--r--drivers/ufs/host/tc-dwc-g210.c (renamed from drivers/scsi/ufs/tc-dwc-g210.c)4
-rw-r--r--drivers/ufs/host/tc-dwc-g210.h (renamed from drivers/scsi/ufs/tc-dwc-g210.h)0
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c (renamed from drivers/scsi/ufs/ti-j721e-ufs.c)0
-rw-r--r--drivers/ufs/host/ufs-exynos.c (renamed from drivers/scsi/ufs/ufs-exynos.c)6
-rw-r--r--drivers/ufs/host/ufs-exynos.h (renamed from drivers/scsi/ufs/ufs-exynos.h)0
-rw-r--r--drivers/ufs/host/ufs-hisi.c (renamed from drivers/scsi/ufs/ufs-hisi.c)8
-rw-r--r--drivers/ufs/host/ufs-hisi.h (renamed from drivers/scsi/ufs/ufs-hisi.h)0
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h (renamed from drivers/scsi/ufs/ufs-mediatek-trace.h)2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c (renamed from drivers/scsi/ufs/ufs-mediatek.c)6
-rw-r--r--drivers/ufs/host/ufs-mediatek.h (renamed from drivers/scsi/ufs/ufs-mediatek.h)0
-rw-r--r--drivers/ufs/host/ufs-qcom-ice.c (renamed from drivers/scsi/ufs/ufs-qcom-ice.c)0
-rw-r--r--drivers/ufs/host/ufs-qcom.c (renamed from drivers/scsi/ufs/ufs-qcom.c)8
-rw-r--r--drivers/ufs/host/ufs-qcom.h (renamed from drivers/scsi/ufs/ufs-qcom.h)2
-rw-r--r--drivers/ufs/host/ufshcd-dwc.c (renamed from drivers/scsi/ufs/ufshcd-dwc.c)4
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h (renamed from drivers/scsi/ufs/ufshcd-dwc.h)2
-rw-r--r--drivers/ufs/host/ufshcd-pci.c (renamed from drivers/scsi/ufs/ufshcd-pci.c)2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c (renamed from drivers/scsi/ufs/ufshcd-pltfrm.c)4
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h (renamed from drivers/scsi/ufs/ufshcd-pltfrm.h)2
-rw-r--r--drivers/ufs/host/ufshci-dwc.h (renamed from drivers/scsi/ufs/ufshci-dwc.h)0
-rw-r--r--drivers/uio/uio_dfl.c2
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c6
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c4
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c47
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.h9
-rw-r--r--drivers/usb/class/cdc-acm.h8
-rw-r--r--drivers/usb/core/devices.c47
-rw-r--r--drivers/usb/core/driver.c25
-rw-r--r--drivers/usb/core/hcd-pci.c5
-rw-r--r--drivers/usb/core/hcd.c29
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb-acpi.c7
-rw-r--r--drivers/usb/dwc2/core.c9
-rw-r--r--drivers/usb/dwc2/core.h5
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/params.c50
-rw-r--r--drivers/usb/dwc3/Kconfig9
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c81
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/drd.c50
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c332
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c17
-rw-r--r--drivers/usb/dwc3/ep0.c14
-rw-r--r--drivers/usb/dwc3/gadget.c190
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c10
-rw-r--r--drivers/usb/gadget/function/f_uvc.c5
-rw-r--r--drivers/usb/gadget/function/u_audio.c4
-rw-r--r--drivers/usb/gadget/function/u_uvc.h1
-rw-r--r--drivers/usb/gadget/function/uvc.h1
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c189
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h120
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c30
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c17
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c4
-rw-r--r--drivers/usb/gadget/udc/core.c289
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c14
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c16
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c37
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h7
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c5
-rw-r--r--drivers/usb/host/ehci-platform.c1
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/fhci-hcd.c3
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c11
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/max3421-hcd.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/ohci-omap.c18
-rw-r--r--drivers/usb/host/ohci-platform.c1
-rw-r--r--drivers/usb/host/ohci-ppc-of.c3
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c16
-rw-r--r--drivers/usb/host/r8a66597-hcd.c3
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c23
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c46
-rw-r--r--drivers/usb/host/xhci-ring.c144
-rw-r--r--drivers/usb/host/xhci.c175
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/isp1760/isp1760-core.c8
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/misc/ftdi-elan.c15
-rw-r--r--drivers/usb/misc/lvstest.c2
-rw-r--r--drivers/usb/musb/mediatek.c73
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/phy/phy-omap-otg.c4
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/whiteheat.c4
-rw-r--r--drivers/usb/storage/alauda.c4
-rw-r--r--drivers/usb/storage/isd200.c8
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/shuttle_usbat.c28
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/typec/bus.c2
-rw-r--r--drivers/usb/typec/mux.c271
-rw-r--r--drivers/usb/typec/mux.h12
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/fsa4480.c218
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c29
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c8
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c4
-rw-r--r--drivers/usb/typec/tipd/core.c32
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c85
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h6
-rw-r--r--drivers/usb/usbip/stub_dev.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c23
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c489
-rw-r--r--drivers/vdpa/vdpa.c286
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c105
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c169
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c3
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c161
-rw-r--r--drivers/vhost/iotlb.c23
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/test.c14
-rw-r--r--drivers/vhost/vdpa.c271
-rw-r--r--drivers/vhost/vhost.c45
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/vhost/vsock.c7
-rw-r--r--drivers/video/console/sticon.c5
-rw-r--r--drivers/video/console/sticore.c32
-rw-r--r--drivers/video/fbdev/pxa3xx-regs.h180
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/sticore.h3
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c3
-rw-r--r--drivers/virtio/virtio.c32
-rw-r--r--drivers/virtio/virtio_balloon.c12
-rw-r--r--drivers/virtio/virtio_mmio.c27
-rw-r--r--drivers/virtio/virtio_pci_common.c15
-rw-r--r--drivers/virtio/virtio_pci_common.h10
-rw-r--r--drivers/virtio/virtio_pci_legacy.c11
-rw-r--r--drivers/virtio/virtio_pci_modern.c14
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c6
-rw-r--r--drivers/virtio/virtio_ring.c55
-rw-r--r--drivers/virtio/virtio_vdpa.c12
-rw-r--r--drivers/visorbus/Kconfig15
-rw-r--r--drivers/visorbus/Makefile10
-rw-r--r--drivers/visorbus/controlvmchannel.h650
-rw-r--r--drivers/visorbus/vbuschannel.h95
-rw-r--r--drivers/visorbus/visorbus_main.c1234
-rw-r--r--drivers/visorbus/visorbus_private.h48
-rw-r--r--drivers/visorbus/visorchannel.c434
-rw-r--r--drivers/visorbus/visorchipset.c1691
-rw-r--r--drivers/vme/Kconfig2
-rw-r--r--drivers/w1/masters/ds2490.c124
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/gxp-wdt.c174
-rw-r--r--drivers/watchdog/sa1100_wdt.c88
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev-dmabuf.c2
-rw-r--r--drivers/xen/grant-table.c14
-rw-r--r--drivers/xen/pvcalls-front.c6
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
1124 files changed, 33724 insertions, 30829 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8d6cd5d08722..b6a172d32a7d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -107,6 +107,8 @@ source "drivers/usb/Kconfig"
source "drivers/mmc/Kconfig"
+source "drivers/ufs/Kconfig"
+
source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
@@ -225,8 +227,6 @@ source "drivers/mux/Kconfig"
source "drivers/opp/Kconfig"
-source "drivers/visorbus/Kconfig"
-
source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
@@ -239,4 +239,6 @@ source "drivers/most/Kconfig"
source "drivers/peci/Kconfig"
+source "drivers/hte/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f735c4955143..9a30842b22c5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -128,6 +128,7 @@ obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
+obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
@@ -181,10 +182,10 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
-obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_PECI) += peci/
+obj-$(CONFIG_HTE) += hte/
diff --git a/drivers/accessibility/speakup/fakekey.c b/drivers/accessibility/speakup/fakekey.c
index cd029968462f..868c47b2a59b 100644
--- a/drivers/accessibility/speakup/fakekey.c
+++ b/drivers/accessibility/speakup/fakekey.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/* fakekey.c
- * Functions for simulating keypresses.
+ * Functions for simulating key presses.
*
* Copyright (C) 2010 the Speakup Team
*/
@@ -78,7 +78,7 @@ void speakup_fake_down_arrow(void)
}
/*
- * Are we handling a simulated keypress on the current CPU?
+ * Are we handling a simulated key press on the current CPU?
* Returns a boolean.
*/
bool speakup_fake_key_pressed(void)
diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c
index 53580bdc5baa..3418ea31d28f 100644
--- a/drivers/accessibility/speakup/serialio.c
+++ b/drivers/accessibility/speakup/serialio.c
@@ -59,7 +59,7 @@ const struct old_serial_port *spk_serial_init(int index)
}
ser = rs_table + index;
- /* Divisor, bytesize and parity */
+ /* Divisor, byte size and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
#if defined(__powerpc__) || defined(__alpha__)
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index 023172ca22ef..a55b60754eb1 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
* This driver is for the Aicom Acent PC internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_acntsa.c b/drivers/accessibility/speakup/speakup_acntsa.c
index 3a863dc61286..2697c51ed6b5 100644
--- a/drivers/accessibility/speakup/speakup_acntsa.c
+++ b/drivers/accessibility/speakup/speakup_acntsa.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
index cd63581b2e99..c84a7e0864b7 100644
--- a/drivers/accessibility/speakup/speakup_apollo.c
+++ b/drivers/accessibility/speakup/speakup_apollo.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include <linux/jiffies.h>
diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c
index a0c3b8ae17a1..4d16d60db9b2 100644
--- a/drivers/accessibility/speakup/speakup_audptr.c
+++ b/drivers/accessibility/speakup/speakup_audptr.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_bns.c b/drivers/accessibility/speakup/speakup_bns.c
index 76dfa3f7c058..b8103eb117b8 100644
--- a/drivers/accessibility/speakup/speakup_bns.c
+++ b/drivers/accessibility/speakup/speakup_bns.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
index 092cfd08a9e1..eaebf62300a4 100644
--- a/drivers/accessibility/speakup/speakup_decext.c
+++ b/drivers/accessibility/speakup/speakup_decext.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/jiffies.h>
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 78ca4987e619..2a7e8d727904 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/unistd.h>
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index a9dd5c45d237..6f01e010aaf4 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the RC Systems DoubleTalk PC internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c
index 63c2f2943282..34f11cd47073 100644
--- a/drivers/accessibility/speakup/speakup_dummy.c
+++ b/drivers/accessibility/speakup/speakup_dummy.c
@@ -8,7 +8,7 @@
* Copyright (C) 2003 David Borowski.
* Copyright (C) 2007 Samuel Thibault.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 1618be87bff1..f61b62f1ea4d 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c
index 3e59b387d0c4..f885cfaa27c8 100644
--- a/drivers/accessibility/speakup/speakup_ltlk.c
+++ b/drivers/accessibility/speakup/speakup_ltlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "speakup.h"
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 19824e7006fe..99f1d4ac426a 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2003 Kirk Reiser.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
@@ -397,6 +397,7 @@ static int softsynth_probe(struct spk_synth *synth)
synthu_device.name = "softsynthu";
synthu_device.fops = &softsynthu_fops;
if (misc_register(&synthu_device)) {
+ misc_deregister(&synth_device);
pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n");
return -ENODEV;
}
diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c
index bd3d8dc300ff..5e3bb3aa98b6 100644
--- a/drivers/accessibility/speakup/speakup_spkout.c
+++ b/drivers/accessibility/speakup/speakup_spkout.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_txprt.c b/drivers/accessibility/speakup/speakup_txprt.c
index a7326f226a5e..9e781347f7eb 100644
--- a/drivers/accessibility/speakup/speakup_txprt.c
+++ b/drivers/accessibility/speakup/speakup_txprt.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3a2adeaef5ce..0e3ed5eb367b 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -98,31 +98,11 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(_dev);
- old = dev->driver_override;
- if (strlen(driver_override)) {
- dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- dev->driver_override = NULL;
- }
- device_unlock(_dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(_dev, &dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f3b639e89dd8..362c0deb65f1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -133,18 +133,45 @@ static int binder_set_stop_on_user_error(const char *val,
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
param_get_int, &binder_stop_on_user_error, 0644);
-#define binder_debug(mask, x...) \
- do { \
- if (binder_debug_mask & mask) \
- pr_info_ratelimited(x); \
- } while (0)
+static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (binder_debug_mask & mask) {
+ va_start(args, format);
+ vaf.va = &args;
+ vaf.fmt = format;
+ pr_info_ratelimited("%pV", &vaf);
+ va_end(args);
+ }
+}
+
+#define binder_txn_error(x...) \
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
+
+static __printf(1, 2) void binder_user_error(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
+ va_start(args, format);
+ vaf.va = &args;
+ vaf.fmt = format;
+ pr_info_ratelimited("%pV", &vaf);
+ va_end(args);
+ }
+
+ if (binder_stop_on_user_error)
+ binder_stop_on_user_error = 2;
+}
-#define binder_user_error(x...) \
+#define binder_set_extended_error(ee, _id, _command, _param) \
do { \
- if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
- pr_info_ratelimited(x); \
- if (binder_stop_on_user_error) \
- binder_stop_on_user_error = 2; \
+ (ee)->id = _id; \
+ (ee)->command = _command; \
+ (ee)->param = _param; \
} while (0)
#define to_flat_binder_object(hdr) \
@@ -1481,6 +1508,8 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
fput(fixup->file);
+ if (fixup->target_fd >= 0)
+ put_unused_fd(fixup->target_fd);
list_del(&fixup->fixup_entry);
kfree(fixup);
}
@@ -1855,8 +1884,10 @@ static void binder_deferred_fd_close(int fd)
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
- close_fd_get_file(fd, &twcb->file);
+ twcb->file = close_fd_get_file(fd);
if (twcb->file) {
+ // pin it until binder_do_fd_close(); see comments there
+ get_file(twcb->file);
filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, TWA_RESUME);
} else {
@@ -2220,6 +2251,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
}
fixup->file = file;
fixup->offset = fd_offset;
+ fixup->target_fd = -1;
trace_binder_transaction_fd_send(t, fd, fixup->offset);
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
@@ -2705,6 +2737,24 @@ static struct binder_node *binder_get_node_refs_for_txn(
return target_node;
}
+static void binder_set_txn_from_error(struct binder_transaction *t, int id,
+ uint32_t command, int32_t param)
+{
+ struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
+
+ if (!from) {
+ /* annotation for sparse */
+ __release(&from->proc->inner_lock);
+ return;
+ }
+
+ /* don't override existing errors */
+ if (from->ee.command == BR_OK)
+ binder_set_extended_error(&from->ee, id, command, param);
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2750,6 +2800,10 @@ static void binder_transaction(struct binder_proc *proc,
e->offsets_size = tr->offsets_size;
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
+ binder_inner_proc_lock(proc);
+ binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
+ binder_inner_proc_unlock(proc);
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -2785,6 +2839,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread == NULL) {
/* annotation for sparse */
__release(&target_thread->proc->inner_lock);
+ binder_txn_error("%d:%d reply target not found\n",
+ thread->pid, proc->pid);
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -2850,6 +2906,8 @@ static void binder_transaction(struct binder_proc *proc,
}
}
if (!target_node) {
+ binder_txn_error("%d:%d cannot find target node\n",
+ thread->pid, proc->pid);
/*
* return_error is set above
*/
@@ -2859,6 +2917,8 @@ static void binder_transaction(struct binder_proc *proc,
}
e->to_node = target_node->debug_id;
if (WARN_ON(proc == target_proc)) {
+ binder_txn_error("%d:%d self transactions not allowed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -2866,6 +2926,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (security_binder_transaction(proc->cred,
target_proc->cred) < 0) {
+ binder_txn_error("%d:%d transaction credentials failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@@ -2937,6 +2999,8 @@ static void binder_transaction(struct binder_proc *proc,
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
@@ -2948,6 +3012,8 @@ static void binder_transaction(struct binder_proc *proc,
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
+ binder_txn_error("%d:%d cannot allocate work for transaction\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
@@ -2994,6 +3060,8 @@ static void binder_transaction(struct binder_proc *proc,
security_cred_getsecid(proc->cred, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
+ binder_txn_error("%d:%d failed to get security context\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3002,7 +3070,8 @@ static void binder_transaction(struct binder_proc *proc,
added_size = ALIGN(secctx_sz, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
- /* integer overflow of extra_buffers_size */
+ binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -3016,9 +3085,15 @@ static void binder_transaction(struct binder_proc *proc,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY), current->tgid);
if (IS_ERR(t->buffer)) {
- /*
- * -ESRCH indicates VMA cleared. The target is dying.
- */
+ char *s;
+
+ ret = PTR_ERR(t->buffer);
+ s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
+ : (ret == -ENOSPC) ? ": no space left"
+ : (ret == -ENOMEM) ? ": memory allocation failed"
+ : "";
+ binder_txn_error("cannot allocate buffer%s", s);
+
return_error_param = PTR_ERR(t->buffer);
return_error = return_error_param == -ESRCH ?
BR_DEAD_REPLY : BR_FAILED_REPLY;
@@ -3101,6 +3176,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
buffer_offset,
sizeof(object_offset))) {
+ binder_txn_error("%d:%d copy offset from buffer failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -3159,6 +3236,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate binder failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3176,6 +3255,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate handle failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3196,6 +3277,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate fd failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3265,6 +3348,8 @@ static void binder_transaction(struct binder_proc *proc,
object_offset,
fda, sizeof(*fda));
if (ret) {
+ binder_txn_error("%d:%d translate fd array failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
@@ -3292,6 +3377,8 @@ static void binder_transaction(struct binder_proc *proc,
(const void __user *)(uintptr_t)bp->buffer,
bp->length);
if (ret) {
+ binder_txn_error("%d:%d deferred copy failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3315,6 +3402,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
bp, sizeof(*bp))) {
+ binder_txn_error("%d:%d failed to fixup parent\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3422,6 +3511,8 @@ static void binder_transaction(struct binder_proc *proc,
return;
err_dead_proc_or_thread:
+ binder_txn_error("%d:%d dead process or thread\n",
+ thread->pid, proc->pid);
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
@@ -3457,21 +3548,26 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
- if (target_thread)
- binder_thread_dec_tmpref(target_thread);
- if (target_proc)
- binder_proc_dec_tmpref(target_proc);
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
- proc->pid, thread->pid, return_error, return_error_param,
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, reply ? "reply" :
+ (tr->flags & TF_ONE_WAY ? "async" : "call"),
+ target_proc ? target_proc->pid : 0,
+ target_thread ? target_thread->pid : 0,
+ t_debug_id, return_error, return_error_param,
(u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+
{
struct binder_transaction_log_entry *fe;
@@ -3491,10 +3587,16 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
+ binder_set_txn_from_error(in_reply_to, t_debug_id,
+ return_error, return_error_param);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
+ binder_inner_proc_lock(proc);
+ binder_set_extended_error(&thread->ee, t_debug_id,
+ return_error, return_error_param);
+ binder_inner_proc_unlock(proc);
thread->return_error.cmd = return_error;
binder_enqueue_thread_work(thread, &thread->return_error.work);
}
@@ -3984,7 +4086,7 @@ static int binder_thread_write(struct binder_proc *proc,
} break;
default:
- pr_err("%d:%d unknown command %d\n",
+ pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
@@ -4075,10 +4177,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
* Now that we are in the context of the transaction target
* process, we can allocate and install fds. Process the
* list of fds to translate and fixup the buffer with the
- * new fds.
+ * new fds first and only then install the files.
*
- * If we fail to allocate an fd, then free the resources by
- * fput'ing files that have not been processed and ksys_close'ing
+ * If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
@@ -4095,41 +4196,31 @@ static int binder_apply_fd_fixups(struct binder_proc *proc,
"failed fd fixup txn %d fd %d\n",
t->debug_id, fd);
ret = -ENOMEM;
- break;
+ goto err;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
"fd fixup txn %d fd %d\n",
t->debug_id, fd);
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
- fd_install(fd, fixup->file);
- fixup->file = NULL;
+ fixup->target_fd = fd;
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
fixup->offset, &fd,
sizeof(u32))) {
ret = -EINVAL;
- break;
+ goto err;
}
}
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
- if (fixup->file) {
- fput(fixup->file);
- } else if (ret) {
- u32 fd;
- int err;
-
- err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
- t->buffer,
- fixup->offset,
- sizeof(fd));
- WARN_ON(err);
- if (!err)
- binder_deferred_fd_close(fd);
- }
+ fd_install(fixup->target_fd, fixup->file);
list_del(&fixup->fixup_entry);
kfree(fixup);
}
return ret;
+
+err:
+ binder_free_txn_fixups(t);
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -4490,7 +4581,7 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
@@ -4632,6 +4723,7 @@ static struct binder_thread *binder_get_thread_ilocked(
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
+ thread->ee.command = BR_OK;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
}
@@ -5070,6 +5162,22 @@ static int binder_ioctl_get_freezer_info(
return 0;
}
+static int binder_ioctl_get_extended_error(struct binder_thread *thread,
+ void __user *ubuf)
+{
+ struct binder_extended_error ee;
+
+ binder_inner_proc_lock(thread->proc);
+ ee = thread->ee;
+ binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (copy_to_user(ubuf, &ee, sizeof(ee)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -5278,6 +5386,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_GET_EXTENDED_ERROR:
+ ret = binder_ioctl_get_extended_error(thread, ubuf);
+ if (ret < 0)
+ goto err;
+ break;
default:
ret = -EINVAL;
goto err;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2ac1008a5f39..5649a0371a1f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1175,14 +1175,11 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc,
unsigned long size;
struct page *page;
pgoff_t pgoff;
- void *kptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
- memset(kptr, 0, size);
- kunmap(page);
+ memset_page(page, pgoff, 0, size);
bytes -= size;
buffer_offset += size;
}
@@ -1220,9 +1217,9 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
+ kptr = kmap_local_page(page) + pgoff;
ret = copy_from_user(kptr, from, size);
- kunmap(page);
+ kunmap_local(kptr);
if (ret)
return bytes - size + ret;
bytes -= size;
@@ -1247,23 +1244,14 @@ static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
unsigned long size;
struct page *page;
pgoff_t pgoff;
- void *tmpptr;
- void *base_ptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- base_ptr = kmap_atomic(page);
- tmpptr = base_ptr + pgoff;
if (to_buffer)
- memcpy(tmpptr, ptr, size);
+ memcpy_to_page(page, pgoff, ptr, size);
else
- memcpy(ptr, tmpptr, size);
- /*
- * kunmap_atomic() takes care of flushing the cache
- * if this device has VIVT cache arch
- */
- kunmap_atomic(base_ptr);
+ memcpy_from_page(ptr, page, pgoff, size);
bytes -= size;
pgoff = 0;
ptr = ptr + size;
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index d6b6b8cb7346..8dc0bccf8513 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -480,6 +480,8 @@ struct binder_proc {
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
* (protected by @proc->inner_lock)
+ * @ee: extended error information from this thread
+ * (protected by @proc->inner_lock)
* @wait: wait queue for thread work
* @stats: per-thread statistics
* (atomics, no lock needed)
@@ -504,6 +506,7 @@ struct binder_thread {
bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
+ struct binder_extended_error ee;
wait_queue_head_t wait;
struct binder_stats stats;
atomic_t tmp_ref;
@@ -515,6 +518,7 @@ struct binder_thread {
* @fixup_entry: list entry
* @file: struct file to be associated with new fd
* @offset: offset in buffer data to this fixup
+ * @target_fd: fd to use by the target to install @file
*
* List element for fd fixups in a transaction. Since file
* descriptors need to be allocated in the context of the
@@ -525,6 +529,7 @@ struct binder_txn_fd_fixup {
struct list_head fixup_entry;
struct file *file;
size_t offset;
+ int target_fd;
};
struct binder_transaction {
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e3605cdd4335..6c5e94f6cb3a 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -60,6 +60,7 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
+ bool extended_error;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -75,6 +76,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
+ .extended_error = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -615,6 +617,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 2448441571ed..400e65190904 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -25,7 +25,6 @@
#include <linux/gpio/consumer.h>
#include <scsi/scsi_host.h>
-#include <mach/palmld.h>
#define DRV_NAME "pata_palmld"
@@ -63,7 +62,7 @@ static int palmld_pata_probe(struct platform_device *pdev)
return -ENOMEM;
/* remap drive's physical memory address */
- mem = devm_ioremap(dev, PALMLD_IDE_PHYS, 0x1000);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (!mem)
return -ENOMEM;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 02f7f1358e86..83217d243c25 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
+obj-$(CONFIG_ACPI) += physical_location.o
obj-y += test/
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index f73b836047cf..579c851a2bd7 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -19,6 +19,9 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_pressure.h>
+
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -195,6 +198,8 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
th_pressure = max_capacity - capacity;
+ trace_thermal_pressure_update(cpu, th_pressure);
+
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2882af26392a..ab71403d102f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -159,6 +159,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void deferred_probe_extend_timeout(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 97936ec49bde..7ca47e5b3c1f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
- goto out_unregister;
+ goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_del_list:
+ klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2eede2ec3d64..7cd789c4985d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -32,6 +32,7 @@
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
+#include "physical_location.h"
#include "power/power.h"
#ifdef CONFIG_SYSFS_DEPRECATED
@@ -2649,8 +2650,17 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_waiting_for_supplier;
}
+ if (dev_add_physical_location(dev)) {
+ error = device_add_group(dev,
+ &dev_attr_physical_location_group);
+ if (error)
+ goto err_remove_dev_removable;
+ }
+
return 0;
+ err_remove_dev_removable:
+ device_remove_file(dev, &dev_attr_removable);
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
@@ -2672,6 +2682,11 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ if (dev->physical_location) {
+ device_remove_group(dev, &dev_attr_physical_location_group);
+ kfree(dev->physical_location);
+ }
+
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 94b7ac9bf459..11b0fb6414d3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -60,6 +60,7 @@ static bool initcalls_done;
/* Save the async probe drivers' name from kernel cmdline */
#define ASYNC_DRV_NAMES_MAX_LEN 256
static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+static bool async_probe_default;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -257,7 +258,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
int driver_deferred_probe_timeout;
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -274,10 +274,10 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
* @dev: device to check
*
* Return:
- * -ENODEV if initcalls have completed and modules are disabled.
- * -ETIMEDOUT if the deferred probe timeout was set and has expired
- * and modules are enabled.
- * -EPROBE_DEFER in other cases.
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
@@ -312,10 +312,23 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
- wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+void deferred_probe_extend_timeout(void)
+{
+ /*
+ * If the work hasn't been queued yet or if the work expired, don't
+ * start a new one.
+ */
+ if (cancel_delayed_work(&deferred_probe_timeout_work)) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ driver_deferred_probe_timeout * HZ);
+ pr_debug("Extended deferred probe timeout by %d secs\n",
+ driver_deferred_probe_timeout);
+ }
+}
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@@ -718,9 +731,6 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
- /* wait for probe timeout */
- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -799,7 +809,11 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
static inline bool cmdline_requested_async_probing(const char *drv_name)
{
- return parse_option_str(async_probe_drv_names, drv_name);
+ bool async_drv;
+
+ async_drv = parse_option_str(async_probe_drv_names, drv_name);
+
+ return (async_probe_default != async_drv);
}
/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
@@ -809,6 +823,8 @@ static int __init save_async_options(char *buf)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ async_probe_default = parse_option_str(async_probe_drv_names, "*");
+
return 1;
}
__setup("driver_async_probe=", save_async_options);
@@ -943,6 +959,7 @@ out_unlock:
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
+ bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -981,7 +998,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule_dev(__device_attach_async_helper, dev);
+ async = true;
} else {
pm_request_idle(dev);
}
@@ -991,6 +1008,8 @@ static int __device_attach(struct device *dev, bool allow_async)
}
out_unlock:
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
@@ -1084,6 +1103,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
__device_driver_lock(dev, dev->parent);
drv = dev->p->async_driver;
+ dev->p->async_driver = NULL;
ret = driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
@@ -1130,7 +1150,7 @@ static int __driver_attach(struct device *dev, void *data)
*/
dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
device_lock(dev);
- if (!dev->driver) {
+ if (!dev->driver && !dev->p->async_driver) {
get_device(dev);
dev->p->async_driver = drv;
async_schedule_dev(__driver_attach_async_helper, dev);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 8c0d33e182fd..15a75afe6b84 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -31,6 +31,75 @@ static struct device *next_device(struct klist_iter *i)
}
/**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ * The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len)
+{
+ const char *new, *old;
+ char *cp;
+
+ if (!override || !s)
+ return -EINVAL;
+
+ /*
+ * The stored value will be used in sysfs show callback (sysfs_emit()),
+ * which has a length limit of PAGE_SIZE and adds a trailing newline.
+ * Thus we can store one character less to avoid truncation during sysfs
+ * show.
+ */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (!len) {
+ /* Empty string passed - clear override */
+ device_lock(dev);
+ old = *override;
+ *override = NULL;
+ device_unlock(dev);
+ kfree(old);
+
+ return 0;
+ }
+
+ cp = strnchr(s, len, '\n');
+ if (cp)
+ len = cp - s;
+
+ new = kstrndup(s, len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = *override;
+ if (cp != s) {
+ *override = new;
+ } else {
+ /* "\n" passed - clear override */
+ kfree(new);
+ *override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.
* @start: Device to begin with
@@ -177,6 +246,7 @@ int driver_register(struct device_driver *drv)
return ret;
}
kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+ deferred_probe_extend_timeout();
return ret;
}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 38f3b66bf52b..5166b323a0f8 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -29,6 +29,9 @@ if FW_LOADER
config FW_LOADER_PAGED_BUF
bool
+config FW_LOADER_SYSFS
+ bool
+
config EXTRA_FIRMWARE
string "Build named firmware blobs into the kernel binary"
help
@@ -72,6 +75,7 @@ config EXTRA_FIRMWARE_DIR
config FW_LOADER_USER_HELPER
bool "Enable the firmware sysfs fallback mechanism"
+ select FW_LOADER_SYSFS
select FW_LOADER_PAGED_BUF
help
This option enables a sysfs loading facility to enable firmware
@@ -159,21 +163,34 @@ config FW_LOADER_USER_HELPER_FALLBACK
config FW_LOADER_COMPRESS
bool "Enable compressed firmware support"
- select FW_LOADER_PAGED_BUF
- select XZ_DEC
help
This option enables the support for loading compressed firmware
files. The caller of firmware API receives the decompressed file
content. The compressed file is loaded as a fallback, only after
loading the raw file failed at first.
- Currently only XZ-compressed files are supported, and they have to
- be compressed with either none or crc32 integrity check type (pass
- "-C crc32" option to xz command).
-
Compressed firmware support does not apply to firmware images
that are built into the kernel image (CONFIG_EXTRA_FIRMWARE).
+if FW_LOADER_COMPRESS
+config FW_LOADER_COMPRESS_XZ
+ bool "Enable XZ-compressed firmware support"
+ select FW_LOADER_PAGED_BUF
+ select XZ_DEC
+ default y
+ help
+ This option adds the support for XZ-compressed files.
+ The files have to be compressed with either none or crc32
+ integrity check type (pass "-C crc32" option to xz command).
+
+config FW_LOADER_COMPRESS_ZSTD
+ bool "Enable ZSTD-compressed firmware support"
+ select ZSTD_DECOMPRESS
+ help
+ This option adds the support for ZSTD-compressed files.
+
+endif # FW_LOADER_COMPRESS
+
config FW_CACHE
bool "Enable firmware caching during suspend"
depends on PM_SLEEP
@@ -186,5 +203,19 @@ config FW_CACHE
If unsure, say Y.
+config FW_UPLOAD
+ bool "Enable users to initiate firmware updates using sysfs"
+ select FW_LOADER_SYSFS
+ select FW_LOADER_PAGED_BUF
+ help
+ Enabling this option will allow device drivers to expose a persistent
+ sysfs interface that allows firmware updates to be initiated from
+ userspace. For example, FPGA based PCIe cards load firmware and FPGA
+ images from local FLASH when the card boots. The images in FLASH may
+ be updated with new images provided by the user. Enable this device
+ to support cards that rely on user-initiated updates for firmware files.
+
+ If unsure, say N.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index e87843408fe6..60d19f9e0ddc 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -6,5 +6,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
firmware_class-objs := main.o
firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
+firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o
+firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o
obj-y += builtin/
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 4afb0e9312c0..bf68e3947814 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -3,12 +3,9 @@
#include <linux/types.h>
#include <linux/kconfig.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/security.h>
-#include <linux/highmem.h>
#include <linux/umh.h>
#include <linux/sysctl.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include "fallback.h"
@@ -18,22 +15,6 @@
* firmware fallback mechanism
*/
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
-
-extern struct firmware_fallback_config fw_fallback_config;
-
-/* These getters are vetted to use int properly */
-static inline int __firmware_loading_timeout(void)
-{
- return fw_fallback_config.loading_timeout;
-}
-
-/* These setters are vetted to use int properly */
-static void __fw_fallback_set_timeout(int timeout)
-{
- fw_fallback_config.loading_timeout = timeout;
-}
-
/*
* use small loading timeout for caching devices' firmware because all these
* firmware images have been loaded successfully at lease once, also system is
@@ -58,52 +39,11 @@ static long firmware_loading_timeout(void)
__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
}
-static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_DONE);
-}
-
-static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_LOADING);
-}
-
static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
{
return __fw_state_wait_common(fw_priv, timeout);
}
-struct fw_sysfs {
- bool nowait;
- struct device dev;
- struct fw_priv *fw_priv;
- struct firmware *fw;
-};
-
-static struct fw_sysfs *to_fw_sysfs(struct device *dev)
-{
- return container_of(dev, struct fw_sysfs, dev);
-}
-
-static void __fw_load_abort(struct fw_priv *fw_priv)
-{
- /*
- * There is a small window in which user can write to 'loading'
- * between loading done/aborted and disappearance of 'loading'
- */
- if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
- return;
-
- fw_state_aborted(fw_priv);
-}
-
-static void fw_load_abort(struct fw_sysfs *fw_sysfs)
-{
- struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
- __fw_load_abort(fw_priv);
-}
-
static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom)
@@ -120,376 +60,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_unlock(&fw_lock);
}
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
-}
-
-/**
- * timeout_store() - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- * Sets the number of seconds to wait for the firmware. Once
- * this expires an error will be returned to the driver and no
- * firmware will be provided.
- *
- * Note: zero means 'wait forever'.
- **/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count)
-{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
-
- if (tmp_loading_timeout < 0)
- tmp_loading_timeout = 0;
-
- __fw_fallback_set_timeout(tmp_loading_timeout);
-
- return count;
-}
-static CLASS_ATTR_RW(timeout);
-
-static struct attribute *firmware_class_attrs[] = {
- &class_attr_timeout.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(firmware_class);
-
-static void fw_dev_release(struct device *dev)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-
- kfree(fw_sysfs);
-}
-
-static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
-{
- if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
- return -ENOMEM;
- if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
- return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
- return -ENOMEM;
-
- return 0;
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int err = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- err = do_firmware_uevent(fw_sysfs, env);
- mutex_unlock(&fw_lock);
- return err;
-}
-
-static struct class firmware_class = {
- .name = "firmware",
- .class_groups = firmware_class_groups,
- .dev_uevent = firmware_uevent,
- .dev_release = fw_dev_release,
-};
-
-int register_sysfs_loader(void)
-{
- int ret = class_register(&firmware_class);
-
- if (ret != 0)
- return ret;
- return register_firmware_config_sysctl();
-}
-
-void unregister_sysfs_loader(void)
-{
- unregister_firmware_config_sysctl();
- class_unregister(&firmware_class);
-}
-
-static ssize_t firmware_loading_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int loading = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- loading = fw_sysfs_loading(fw_sysfs->fw_priv);
- mutex_unlock(&fw_lock);
-
- return sysfs_emit(buf, "%d\n", loading);
-}
-
-/**
- * firmware_loading_store() - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- * The relevant values are:
- *
- * 1: Start a load, discarding any previous partial load.
- * 0: Conclude the load and hand the data to the driver code.
- * -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (fw_state_is_aborted(fw_priv))
- goto out;
-
- switch (loading) {
- case 1:
- /* discarding any previous partial load */
- if (!fw_sysfs_done(fw_priv)) {
- fw_free_paged_buf(fw_priv);
- fw_state_start(fw_priv);
- }
- break;
- case 0:
- if (fw_sysfs_loading(fw_priv)) {
- int rc;
-
- /*
- * Several loading requests may be pending on
- * one same firmware buf, so let all requests
- * see the mapped 'buf->data' once the loading
- * is completed.
- * */
- rc = fw_map_paged_buf(fw_priv);
- if (rc)
- dev_err(dev, "%s: map pages failed\n",
- __func__);
- else
- rc = security_kernel_post_load_data(fw_priv->data,
- fw_priv->size,
- LOADING_FIRMWARE, "blob");
-
- /*
- * Same logic as fw_load_abort, only the DONE bit
- * is ignored and we set ABORT only on failure.
- */
- if (rc) {
- fw_state_aborted(fw_priv);
- written = rc;
- } else {
- fw_state_done(fw_priv);
- }
- break;
- }
- fallthrough;
- default:
- dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- fallthrough;
- case -1:
- fw_load_abort(fw_sysfs);
- break;
- }
-out:
- mutex_unlock(&fw_lock);
- return written;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- if (read)
- memcpy(buffer, fw_priv->data + offset, count);
- else
- memcpy(fw_priv->data + offset, buffer, count);
-}
-
-static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- if (read)
- memcpy(buffer, page_data + page_ofs, page_cnt);
- else
- memcpy(page_data + page_ofs, buffer, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-}
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t ret_count;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- ret_count = -ENODEV;
- goto out;
- }
- if (offset > fw_priv->size) {
- ret_count = 0;
- goto out;
- }
- if (count > fw_priv->size - offset)
- count = fw_priv->size - offset;
-
- ret_count = count;
-
- if (fw_priv->data)
- firmware_rw_data(fw_priv, buffer, offset, count, true);
- else
- firmware_rw(fw_priv, buffer, offset, count, true);
-
-out:
- mutex_unlock(&fw_lock);
- return ret_count;
-}
-
-static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
-{
- int err;
-
- err = fw_grow_paged_buf(fw_sysfs->fw_priv,
- PAGE_ALIGN(min_size) >> PAGE_SHIFT);
- if (err)
- fw_load_abort(fw_sysfs);
- return err;
-}
-
-/**
- * firmware_data_write() - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- * Data written to the 'data' attribute will be later handed to
- * the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t retval;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- retval = -ENODEV;
- goto out;
- }
-
- if (fw_priv->data) {
- if (offset + count > fw_priv->allocated_size) {
- retval = -ENOMEM;
- goto out;
- }
- firmware_rw_data(fw_priv, buffer, offset, count, false);
- retval = count;
- } else {
- retval = fw_realloc_pages(fw_sysfs, offset + count);
- if (retval)
- goto out;
-
- retval = count;
- firmware_rw(fw_priv, buffer, offset, count, false);
- }
-
- fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
-out:
- mutex_unlock(&fw_lock);
- return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
- .attr = { .name = "data", .mode = 0644 },
- .size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
-};
-
-static struct attribute *fw_dev_attrs[] = {
- &dev_attr_loading.attr,
- NULL
-};
-
-static struct bin_attribute *fw_dev_bin_attrs[] = {
- &firmware_attr_data,
- NULL
-};
-
-static const struct attribute_group fw_dev_attr_group = {
- .attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
-};
-
-static const struct attribute_group *fw_dev_attr_groups[] = {
- &fw_dev_attr_group,
- NULL
-};
-
-static struct fw_sysfs *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, u32 opt_flags)
-{
- struct fw_sysfs *fw_sysfs;
- struct device *f_dev;
-
- fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
- if (!fw_sysfs) {
- fw_sysfs = ERR_PTR(-ENOMEM);
- goto exit;
- }
-
- fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
- fw_sysfs->fw = firmware;
- f_dev = &fw_sysfs->dev;
-
- device_initialize(f_dev);
- dev_set_name(f_dev, "%s", fw_name);
- f_dev->parent = device;
- f_dev->class = &firmware_class;
- f_dev->groups = fw_dev_attr_groups;
-exit:
- return fw_sysfs;
-}
-
/**
* fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
* @fw_sysfs: firmware sysfs information for the firmware to load
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 9f3055d3b4ca..144148595660 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,29 +6,7 @@
#include <linux/device.h>
#include "firmware.h"
-
-/**
- * struct firmware_fallback_config - firmware fallback configuration settings
- *
- * Helps describe and fine tune the fallback mechanism.
- *
- * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
- * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
- * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
- * functionality on a kernel where that config entry has been disabled.
- * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
- * This emulates the behaviour as if we had set the kernel
- * config CONFIG_FW_LOADER_USER_HELPER=n.
- * @old_timeout: for internal use
- * @loading_timeout: the timeout to wait for the fallback mechanism before
- * giving up, in seconds.
- */
-struct firmware_fallback_config {
- unsigned int force_sysfs_fallback;
- unsigned int ignore_sysfs_fallback;
- int old_timeout;
- int loading_timeout;
-};
+#include "sysfs.h"
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
@@ -40,19 +18,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
-int register_sysfs_loader(void);
-void unregister_sysfs_loader(void);
-#ifdef CONFIG_SYSCTL
-extern int register_firmware_config_sysctl(void);
-extern void unregister_firmware_config_sysctl(void);
-#else
-static inline int register_firmware_config_sysctl(void)
-{
- return 0;
-}
-static inline void unregister_firmware_config_sysctl(void) { }
-#endif /* CONFIG_SYSCTL */
-
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
@@ -66,15 +31,6 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }
-
-static inline int register_sysfs_loader(void)
-{
- return 0;
-}
-
-static inline void unregister_sysfs_loader(void)
-{
-}
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 2889f446ad41..fe77e91c38a2 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -87,6 +87,7 @@ struct fw_priv {
};
extern struct mutex fw_lock;
+extern struct firmware_cache fw_cache;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)
@@ -149,7 +150,22 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
+static inline bool fw_state_is_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags);
int assign_fw(struct firmware *fw, struct device *device);
+void free_fw_priv(struct fw_priv *fw_priv);
+void fw_state_init(struct fw_priv *fw_priv);
#ifdef CONFIG_FW_LOADER
bool firmware_is_builtin(const struct firmware *fw);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 406a907a4cae..ac3f34e80194 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -35,6 +35,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/zstd.h>
#include <linux/xz.h>
#include <generated/utsrelease.h>
@@ -91,9 +92,9 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
* guarding for corner cases a global lock should be OK */
DEFINE_MUTEX(fw_lock);
-static struct firmware_cache fw_cache;
+struct firmware_cache fw_cache;
-static void fw_state_init(struct fw_priv *fw_priv)
+void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
@@ -163,13 +164,9 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
}
/* Returns 1 for batching firmware requests with the same name */
-static int alloc_lookup_fw_priv(const char *fw_name,
- struct firmware_cache *fwc,
- struct fw_priv **fw_priv,
- void *dbuf,
- size_t size,
- size_t offset,
- u32 opt_flags)
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -224,7 +221,7 @@ static void __free_fw_priv(struct kref *ref)
kfree(fw_priv);
}
-static void free_fw_priv(struct fw_priv *fw_priv)
+void free_fw_priv(struct fw_priv *fw_priv)
{
struct firmware_cache *fwc = fw_priv->fwc;
spin_lock(&fwc->lock);
@@ -253,6 +250,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
+ fw_priv->data = NULL;
+ fw_priv->size = 0;
}
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
@@ -305,9 +304,73 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
#endif
/*
+ * ZSTD-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ size_t len, out_size, workspace_size;
+ void *workspace, *out_buf;
+ zstd_dctx *ctx;
+ int err;
+
+ if (fw_priv->allocated_size) {
+ out_size = fw_priv->allocated_size;
+ out_buf = fw_priv->data;
+ } else {
+ zstd_frame_header params;
+
+ if (zstd_get_frame_header(&params, in_buffer, in_size) ||
+ params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
+ dev_dbg(dev, "%s: invalid zstd header\n", __func__);
+ return -EINVAL;
+ }
+ out_size = params.frameContentSize;
+ out_buf = vzalloc(out_size);
+ if (!out_buf)
+ return -ENOMEM;
+ }
+
+ workspace_size = zstd_dctx_workspace_bound();
+ workspace = kvzalloc(workspace_size, GFP_KERNEL);
+ if (!workspace) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ctx = zstd_init_dctx(workspace, workspace_size);
+ if (!ctx) {
+ dev_dbg(dev, "%s: failed to initialize context\n", __func__);
+ err = -EINVAL;
+ goto error;
+ }
+
+ len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
+ if (zstd_is_error(len)) {
+ dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
+ zstd_get_error_code(len));
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fw_priv->allocated_size)
+ fw_priv->data = out_buf;
+ fw_priv->size = len;
+ err = 0;
+
+ error:
+ kvfree(workspace);
+ if (err && !fw_priv->allocated_size)
+ vfree(out_buf);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
+
+/*
* XZ-compressed firmware support
*/
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
/* show an error and return the standard error code */
static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
{
@@ -401,7 +464,7 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
else
return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
}
-#endif /* CONFIG_FW_LOADER_COMPRESS */
+#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
/* direct firmware loading support */
static char fw_path_para[256];
@@ -771,7 +834,12 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!(opt_flags & FW_OPT_PARTIAL))
nondirect = true;
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
+#endif
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
fw_decompress_xz);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
new file mode 100644
index 000000000000..5b0b85b70b6f
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "sysfs.h"
+
+/*
+ * sysfs support for firmware loader
+ */
+
+void __fw_load_abort(struct fw_priv *fw_priv)
+{
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done/aborted and disappearance of 'loading'
+ */
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ return;
+
+ fw_state_aborted(fw_priv);
+}
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ * Sets the number of seconds to wait for the firmware. Once
+ * this expires an error will be returned to the driver and no
+ * firmware will be provided.
+ *
+ * Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+ if (tmp_loading_timeout < 0)
+ tmp_loading_timeout = 0;
+
+ __fw_fallback_set_timeout(tmp_loading_timeout);
+
+ return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+ return -ENOMEM;
+ if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+ return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+static void fw_dev_release(struct device *dev)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+ if (fw_sysfs->fw_upload_priv) {
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_sysfs->fw_upload_priv);
+ }
+ kfree(fw_sysfs);
+}
+
+static struct class firmware_class = {
+ .name = "firmware",
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ .class_groups = firmware_class_groups,
+ .dev_uevent = firmware_uevent,
+#endif
+ .dev_release = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
+}
+
+void unregister_sysfs_loader(void)
+{
+ unregister_firmware_config_sysctl();
+ class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ loading = fw_state_is_loading(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+
+ return sysfs_emit(buf, "%d\n", loading);
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ * The relevant values are:
+ *
+ * 1: Start a load, discarding any previous partial load.
+ * 0: Conclude the load and hand the data to the driver code.
+ * -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t written = count;
+ int loading = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ goto out;
+
+ switch (loading) {
+ case 1:
+ /* discarding any previous partial load */
+ fw_free_paged_buf(fw_priv);
+ fw_state_start(fw_priv);
+ break;
+ case 0:
+ if (fw_state_is_loading(fw_priv)) {
+ int rc;
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ */
+ rc = fw_map_paged_buf(fw_priv);
+ if (rc)
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ else
+ rc = security_kernel_post_load_data(fw_priv->data,
+ fw_priv->size,
+ LOADING_FIRMWARE,
+ "blob");
+
+ /*
+ * Same logic as fw_load_abort, only the DONE bit
+ * is ignored and we set ABORT only on failure.
+ */
+ if (rc) {
+ fw_state_aborted(fw_priv);
+ written = rc;
+ } else {
+ fw_state_done(fw_priv);
+
+ /*
+ * If this is a user-initiated firmware upload
+ * then start the upload in a worker thread now.
+ */
+ rc = fw_upload_start(fw_sysfs);
+ if (rc)
+ written = rc;
+ }
+ break;
+ }
+ fallthrough;
+ default:
+ dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+ fallthrough;
+ case -1:
+ fw_load_abort(fw_sysfs);
+ if (fw_sysfs->fw_upload_priv)
+ fw_state_init(fw_sysfs->fw_priv);
+
+ break;
+ }
+out:
+ mutex_unlock(&fw_lock);
+ return written;
+}
+
+DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, fw_priv->data + offset, count);
+ else
+ memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE - 1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(fw_priv->pages[page_nr]);
+
+ if (read)
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+ else
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(fw_priv->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+ if (offset > fw_priv->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
+
+ ret_count = count;
+
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
+ else
+ firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+ int err;
+
+ err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+ PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+ if (err)
+ fw_load_abort(fw_sysfs);
+ return err;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ * Data written to the 'data' attribute will be later handed to
+ * the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
+ if (retval)
+ goto out;
+
+ retval = count;
+ firmware_rw(fw_priv, buffer, offset, count, false);
+ }
+
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+ &dev_attr_loading.attr,
+#ifdef CONFIG_FW_UPLOAD
+ &dev_attr_cancel.attr,
+ &dev_attr_status.attr,
+ &dev_attr_error.attr,
+ &dev_attr_remaining_size.attr,
+#endif
+ NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+ &firmware_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
+#ifdef CONFIG_FW_UPLOAD
+ .is_visible = fw_upload_is_visible,
+#endif
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+ &fw_dev_attr_group,
+ NULL
+};
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ struct device *f_dev;
+
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
+
+ device_initialize(f_dev);
+ dev_set_name(f_dev, "%s", fw_name);
+ f_dev->parent = device;
+ f_dev->class = &firmware_class;
+ f_dev->groups = fw_dev_attr_groups;
+exit:
+ return fw_sysfs;
+}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
new file mode 100644
index 000000000000..5d8ff1675c79
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_SYSFS_H
+#define __FIRMWARE_SYSFS_H
+
+#include <linux/device.h>
+
+#include "firmware.h"
+
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
+extern struct firmware_fallback_config fw_fallback_config;
+extern struct device_attribute dev_attr_loading;
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * This emulates the behaviour as if we had set the kernel
+ * config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * giving up, in seconds.
+ */
+struct firmware_fallback_config {
+ unsigned int force_sysfs_fallback;
+ unsigned int ignore_sysfs_fallback;
+ int old_timeout;
+ int loading_timeout;
+};
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+ return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static inline void __fw_fallback_set_timeout(int timeout)
+{
+ fw_fallback_config.loading_timeout = timeout;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_SYSFS
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL)
+int register_firmware_config_sysctl(void);
+void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */
+#else /* CONFIG_FW_LOADER_SYSFS */
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_SYSFS */
+
+struct fw_sysfs {
+ bool nowait;
+ struct device dev;
+ struct fw_priv *fw_priv;
+ struct firmware *fw;
+ void *fw_upload_priv;
+};
+
+static inline struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+ return container_of(dev, struct fw_sysfs, dev);
+}
+
+void __fw_load_abort(struct fw_priv *fw_priv);
+
+static inline void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ __fw_load_abort(fw_priv);
+}
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags);
+
+#ifdef CONFIG_FW_UPLOAD
+extern struct device_attribute dev_attr_status;
+extern struct device_attribute dev_attr_error;
+extern struct device_attribute dev_attr_cancel;
+extern struct device_attribute dev_attr_remaining_size;
+
+int fw_upload_start(struct fw_sysfs *fw_sysfs);
+umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+#else
+static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ return 0;
+}
+#endif
+
+#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
new file mode 100644
index 000000000000..87044d52322a
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "sysfs_upload.h"
+
+/*
+ * Support for user-space to initiate a firmware upload to a device.
+ */
+
+static const char * const fw_upload_prog_str[] = {
+ [FW_UPLOAD_PROG_IDLE] = "idle",
+ [FW_UPLOAD_PROG_RECEIVING] = "receiving",
+ [FW_UPLOAD_PROG_PREPARING] = "preparing",
+ [FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
+ [FW_UPLOAD_PROG_PROGRAMMING] = "programming"
+};
+
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static const char *fw_upload_progress(struct device *dev,
+ enum fw_upload_prog prog)
+{
+ const char *status = "unknown-status";
+
+ if (prog < FW_UPLOAD_PROG_MAX)
+ status = fw_upload_prog_str[prog];
+ else
+ dev_err(dev, "Invalid status during secure update: %d\n", prog);
+
+ return status;
+}
+
+static const char *fw_upload_error(struct device *dev,
+ enum fw_upload_err err_code)
+{
+ const char *error = "unknown-error";
+
+ if (err_code < FW_UPLOAD_ERR_MAX)
+ error = fw_upload_err_str[err_code];
+ else
+ dev_err(dev, "Invalid error code during secure update: %d\n",
+ err_code);
+
+ return error;
+}
+
+static ssize_t
+status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
+}
+DEVICE_ATTR_RO(status);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret;
+
+ mutex_lock(&fwlp->lock);
+
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
+ ret = -EBUSY;
+ else if (!fwlp->err_code)
+ ret = 0;
+ else
+ ret = sysfs_emit(buf, "%s:%s\n",
+ fw_upload_progress(dev, fwlp->err_progress),
+ fw_upload_error(dev, fwlp->err_code));
+
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_RO(error);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret = count;
+ bool cancel;
+
+ if (kstrtobool(buf, &cancel) || !cancel)
+ return -EINVAL;
+
+ mutex_lock(&fwlp->lock);
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
+ ret = -ENODEV;
+
+ fwlp->ops->cancel(fwlp->fw_upload);
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_WO(cancel);
+
+static ssize_t remaining_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
+}
+DEVICE_ATTR_RO(remaining_size);
+
+umode_t
+fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ static struct fw_sysfs *fw_sysfs;
+
+ fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
+
+ if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
+ return attr->mode;
+
+ return 0;
+}
+
+static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
+ enum fw_upload_prog new_progress)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = new_progress;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_set_error(struct fw_upload_priv *fwlp,
+ enum fw_upload_err err_code)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->err_progress = fwlp->progress;
+ fwlp->err_code = err_code;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = FW_UPLOAD_PROG_IDLE;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_main(struct work_struct *work)
+{
+ struct fw_upload_priv *fwlp;
+ struct fw_sysfs *fw_sysfs;
+ u32 written = 0, offset = 0;
+ enum fw_upload_err ret;
+ struct device *fw_dev;
+ struct fw_upload *fwl;
+
+ fwlp = container_of(work, struct fw_upload_priv, work);
+ fwl = fwlp->fw_upload;
+ fw_sysfs = (struct fw_sysfs *)fwl->priv;
+ fw_dev = &fw_sysfs->dev;
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
+ ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
+ if (ret != FW_UPLOAD_ERR_NONE) {
+ fw_upload_set_error(fwlp, ret);
+ goto putdev_exit;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
+ while (fwlp->remaining_size) {
+ ret = fwlp->ops->write(fwl, fwlp->data, offset,
+ fwlp->remaining_size, &written);
+ if (ret != FW_UPLOAD_ERR_NONE || !written) {
+ if (ret == FW_UPLOAD_ERR_NONE) {
+ dev_warn(fw_dev, "write-op wrote zero data\n");
+ ret = FW_UPLOAD_ERR_RW_ERROR;
+ }
+ fw_upload_set_error(fwlp, ret);
+ goto done;
+ }
+
+ fwlp->remaining_size -= written;
+ offset += written;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
+ ret = fwlp->ops->poll_complete(fwl);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ fw_upload_set_error(fwlp, ret);
+
+done:
+ if (fwlp->ops->cleanup)
+ fwlp->ops->cleanup(fwl);
+
+putdev_exit:
+ put_device(fw_dev->parent);
+
+ /*
+ * Note: fwlp->remaining_size is left unmodified here to provide
+ * additional information on errors. It will be reinitialized when
+ * the next firmeware upload begins.
+ */
+ mutex_lock(&fw_lock);
+ fw_free_paged_buf(fw_sysfs->fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+ fwlp->data = NULL;
+ fw_upload_prog_complete(fwlp);
+}
+
+/*
+ * Start a worker thread to upload data to the parent driver.
+ * Must be called with fw_lock held.
+ */
+int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+ struct device *fw_dev = &fw_sysfs->dev;
+ struct fw_upload_priv *fwlp;
+
+ if (!fw_sysfs->fw_upload_priv)
+ return 0;
+
+ if (!fw_priv->size) {
+ fw_free_paged_buf(fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ return 0;
+ }
+
+ fwlp = fw_sysfs->fw_upload_priv;
+ mutex_lock(&fwlp->lock);
+
+ /* Do not interfere with an on-going fw_upload */
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -EBUSY;
+ }
+
+ get_device(fw_dev->parent); /* released in fw_upload_main */
+
+ fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
+ fwlp->err_code = 0;
+ fwlp->remaining_size = fw_priv->size;
+ fwlp->data = fw_priv->data;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name,
+ fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+
+ queue_work(system_long_wq, &fwlp->work);
+ mutex_unlock(&fwlp->lock);
+
+ return 0;
+}
+
+/**
+ * firmware_upload_register() - register for the firmware upload sysfs API
+ * @module: kernel module of this device
+ * @parent: parent device instantiating firmware upload
+ * @name: firmware name to be associated with this device
+ * @ops: pointer to structure of firmware upload ops
+ * @dd_handle: pointer to parent driver private data
+ *
+ * @name must be unique among all users of firmware upload. The firmware
+ * sysfs files for this device will be found at /sys/class/firmware/@name.
+ *
+ * Return: struct fw_upload pointer or ERR_PTR()
+ *
+ **/
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ u32 opt_flags = FW_OPT_NOCACHE;
+ struct fw_upload *fw_upload;
+ struct fw_upload_priv *fw_upload_priv;
+ struct fw_sysfs *fw_sysfs;
+ struct fw_priv *fw_priv;
+ struct device *fw_dev;
+ int ret;
+
+ if (!name || name[0] == '\0')
+ return ERR_PTR(-EINVAL);
+
+ if (!ops || !ops->cancel || !ops->prepare ||
+ !ops->write || !ops->poll_complete) {
+ dev_err(parent, "Attempt to register without all required ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!try_module_get(module))
+ return ERR_PTR(-EFAULT);
+
+ fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
+ if (!fw_upload) {
+ ret = -ENOMEM;
+ goto exit_module_put;
+ }
+
+ fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
+ if (!fw_upload_priv) {
+ ret = -ENOMEM;
+ goto free_fw_upload;
+ }
+
+ fw_upload_priv->fw_upload = fw_upload;
+ fw_upload_priv->ops = ops;
+ mutex_init(&fw_upload_priv->lock);
+ fw_upload_priv->module = module;
+ fw_upload_priv->name = name;
+ fw_upload_priv->err_code = 0;
+ fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
+ INIT_WORK(&fw_upload_priv->work, fw_upload_main);
+ fw_upload->dd_handle = dd_handle;
+
+ fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto free_fw_upload_priv;
+ }
+ fw_upload->priv = fw_sysfs;
+ fw_sysfs->fw_upload_priv = fw_upload_priv;
+ fw_dev = &fw_sysfs->dev;
+
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0,
+ FW_OPT_NOCACHE);
+ if (ret != 0) {
+ if (ret > 0)
+ ret = -EINVAL;
+ goto free_fw_sysfs;
+ }
+ fw_priv->is_paged_buf = true;
+ fw_sysfs->fw_priv = fw_priv;
+
+ ret = device_add(fw_dev);
+ if (ret) {
+ dev_err(fw_dev, "%s: device_register failed\n", __func__);
+ put_device(fw_dev);
+ goto exit_module_put;
+ }
+
+ return fw_upload;
+
+free_fw_sysfs:
+ kfree(fw_sysfs);
+
+free_fw_upload_priv:
+ kfree(fw_upload_priv);
+
+free_fw_upload:
+ kfree(fw_upload);
+
+exit_module_put:
+ module_put(module);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_register);
+
+/**
+ * firmware_upload_unregister() - Unregister firmware upload interface
+ * @fw_upload: pointer to struct fw_upload
+ **/
+void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+ struct fw_sysfs *fw_sysfs = fw_upload->priv;
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ mutex_lock(&fw_upload_priv->lock);
+ if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fw_upload_priv->lock);
+ goto unregister;
+ }
+
+ fw_upload_priv->ops->cancel(fw_upload);
+ mutex_unlock(&fw_upload_priv->lock);
+
+ /* Ensure lower-level device-driver is finished */
+ flush_work(&fw_upload_priv->work);
+
+unregister:
+ device_unregister(&fw_sysfs->dev);
+ module_put(fw_upload_priv->module);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h
new file mode 100644
index 000000000000..31931ff7808a
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSFS_UPLOAD_H
+#define __SYSFS_UPLOAD_H
+
+#include <linux/device.h>
+
+#include "sysfs.h"
+
+/**
+ * enum fw_upload_prog - firmware upload progress codes
+ * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress
+ * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data
+ * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload
+ * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device
+ * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update
+ * @FW_UPLOAD_PROG_MAX: Maximum progress code marker
+ */
+enum fw_upload_prog {
+ FW_UPLOAD_PROG_IDLE,
+ FW_UPLOAD_PROG_RECEIVING,
+ FW_UPLOAD_PROG_PREPARING,
+ FW_UPLOAD_PROG_TRANSFERRING,
+ FW_UPLOAD_PROG_PROGRAMMING,
+ FW_UPLOAD_PROG_MAX
+};
+
+struct fw_upload_priv {
+ struct fw_upload *fw_upload;
+ struct module *module;
+ const char *name;
+ const struct fw_upload_ops *ops;
+ struct mutex lock; /* protect data structure contents */
+ struct work_struct work;
+ const u8 *data; /* pointer to update data */
+ u32 remaining_size; /* size remaining to transfer */
+ enum fw_upload_prog progress;
+ enum fw_upload_prog err_progress; /* progress at time of failure */
+ enum fw_upload_err err_code; /* security manager error code */
+};
+
+#endif /* __SYSFS_UPLOAD_H */
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
new file mode 100644
index 000000000000..87af641cfe1a
--- /dev/null
+++ b/drivers/base/physical_location.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sysfs.h>
+
+#include "physical_location.h"
+
+bool dev_add_physical_location(struct device *dev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return false;
+
+ status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ dev->physical_location =
+ kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+ if (!dev->physical_location)
+ return false;
+ dev->physical_location->panel = pld->panel;
+ dev->physical_location->vertical_position = pld->vertical_position;
+ dev->physical_location->horizontal_position = pld->horizontal_position;
+ dev->physical_location->dock = pld->dock;
+ dev->physical_location->lid = pld->lid;
+
+ ACPI_FREE(pld);
+ return true;
+}
+
+static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *panel;
+
+ switch (dev->physical_location->panel) {
+ case DEVICE_PANEL_TOP:
+ panel = "top";
+ break;
+ case DEVICE_PANEL_BOTTOM:
+ panel = "bottom";
+ break;
+ case DEVICE_PANEL_LEFT:
+ panel = "left";
+ break;
+ case DEVICE_PANEL_RIGHT:
+ panel = "right";
+ break;
+ case DEVICE_PANEL_FRONT:
+ panel = "front";
+ break;
+ case DEVICE_PANEL_BACK:
+ panel = "back";
+ break;
+ default:
+ panel = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", panel);
+}
+static DEVICE_ATTR_RO(panel);
+
+static ssize_t vertical_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *vertical_position;
+
+ switch (dev->physical_location->vertical_position) {
+ case DEVICE_VERT_POS_UPPER:
+ vertical_position = "upper";
+ break;
+ case DEVICE_VERT_POS_CENTER:
+ vertical_position = "center";
+ break;
+ case DEVICE_VERT_POS_LOWER:
+ vertical_position = "lower";
+ break;
+ default:
+ vertical_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", vertical_position);
+}
+static DEVICE_ATTR_RO(vertical_position);
+
+static ssize_t horizontal_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *horizontal_position;
+
+ switch (dev->physical_location->horizontal_position) {
+ case DEVICE_HORI_POS_LEFT:
+ horizontal_position = "left";
+ break;
+ case DEVICE_HORI_POS_CENTER:
+ horizontal_position = "center";
+ break;
+ case DEVICE_HORI_POS_RIGHT:
+ horizontal_position = "right";
+ break;
+ default:
+ horizontal_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", horizontal_position);
+}
+static DEVICE_ATTR_RO(horizontal_position);
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->dock ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(dock);
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->lid ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(lid);
+
+static struct attribute *dev_attr_physical_location[] = {
+ &dev_attr_panel.attr,
+ &dev_attr_vertical_position.attr,
+ &dev_attr_horizontal_position.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_lid.attr,
+ NULL,
+};
+
+const struct attribute_group dev_attr_physical_location_group = {
+ .name = "physical_location",
+ .attrs = dev_attr_physical_location,
+};
+
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
new file mode 100644
index 000000000000..82cde9f1b161
--- /dev/null
+++ b/drivers/base/physical_location.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/device.h>
+
+#ifdef CONFIG_ACPI
+extern bool dev_add_physical_location(struct device *dev);
+extern const struct attribute_group dev_attr_physical_location_group;
+#else
+static inline bool dev_add_physical_location(struct device *dev) { return false; };
+static const struct attribute_group dev_attr_physical_location_group = {};
+#endif
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 70bc30cf575c..51bb2289865c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -233,7 +233,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
out_not_found:
ret = -ENXIO;
out:
- WARN(ret == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!ret, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -448,7 +449,8 @@ static int __platform_get_irq_byname(struct platform_device *dev,
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
- WARN(r->start == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!r->start, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return r->start;
}
@@ -1277,31 +1279,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 3adcac2c78fa..ed6f449f8e5c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1206,15 +1206,23 @@ const void *device_get_match_data(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_match_data);
-static void *
-fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
struct fwnode_handle *ep;
+ unsigned int count = 0;
void *ret;
fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (matches && count >= matches_len) {
+ fwnode_handle_put(ep);
+ break;
+ }
+
node = fwnode_graph_get_remote_port_parent(ep);
if (!fwnode_device_is_available(node)) {
fwnode_handle_put(node);
@@ -1224,33 +1232,43 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
ret = match(node, con_id, data);
fwnode_handle_put(node);
if (ret) {
- fwnode_handle_put(ep);
- return ret;
+ if (matches)
+ matches[count] = ret;
+ count++;
}
}
- return NULL;
+ return count;
}
-static void *
-fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
+ unsigned int count = 0;
+ unsigned int i;
void *ret;
- int i;
for (i = 0; ; i++) {
+ if (matches && count >= matches_len)
+ break;
+
node = fwnode_find_reference(fwnode, con_id, i);
if (IS_ERR(node))
break;
ret = match(node, NULL, data);
fwnode_handle_put(node);
- if (ret)
- return ret;
+ if (ret) {
+ if (matches)
+ matches[count] = ret;
+ count++;
+ }
}
- return NULL;
+ return count;
}
/**
@@ -1268,15 +1286,61 @@ void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match)
{
+ unsigned int count;
void *ret;
if (!fwnode || !match)
return NULL;
- ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
- if (ret)
+ count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ if (count)
return ret;
- return fwnode_devcon_match(fwnode, con_id, data, match);
+ count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ return count ? ret : NULL;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
+ * fwnode_connection_find_matches - Find connections from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ * @matches: (Optional) array of pointers to fill with matches
+ * @matches_len: Length of @matches
+ *
+ * Find up to @matches_len connections with unique identifier @con_id between
+ * @fwnode and other device nodes. @match will be used to convert the
+ * connection description to data the caller is expecting to be returned
+ * through the @matches array.
+ * If @matches is NULL @matches_len is ignored and the total number of resolved
+ * matches is returned.
+ *
+ * Return: Number of matches resolved, or negative errno.
+ */
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len)
+{
+ unsigned int count_graph;
+ unsigned int count_ref;
+
+ if (!fwnode || !match)
+ return -EINVAL;
+
+ count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ if (matches) {
+ matches += count_graph;
+ matches_len -= count_graph;
+ }
+
+ count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ return count_graph + count_ref;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f1dda4ef22cc..084f9b8a0ba3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1102,7 +1102,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
if (partscan)
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+ clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1198,7 +1198,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
*/
lo->lo_flags = 0;
if (!part_shift)
- lo->lo_disk->flags |= GENHD_FL_NO_PART;
+ set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
@@ -1308,7 +1308,7 @@ out_unfreeze:
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+ clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
partscan = true;
}
out_unlock:
@@ -2011,7 +2011,7 @@ static int loop_add(int i)
* userspace tools. Parameters like this in general should be avoided.
*/
if (!part_shift)
- disk->flags |= GENHD_FL_NO_PART;
+ set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac8b045c777c..07f3c139a3d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -403,13 +403,14 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
- if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
@@ -478,6 +479,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
@@ -745,7 +747,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
- if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
tag, cmd->status, cmd->flags);
ret = -ENOENT;
@@ -854,8 +856,16 @@ static void recv_work(struct work_struct *work)
}
rq = blk_mq_rq_from_pdu(cmd);
- if (likely(!blk_should_fake_timeout(rq->q)))
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q))) {
+ bool complete;
+
+ mutex_lock(&cmd->lock);
+ complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
+ &cmd->flags);
+ mutex_unlock(&cmd->lock);
+ if (complete)
+ blk_mq_complete_request(rq);
+ }
percpu_ref_put(&q->q_usage_counter);
}
@@ -1419,7 +1429,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
- sock_shutdown(nbd);
+ nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1518,15 +1528,20 @@ static struct nbd_config *nbd_alloc_config(void)
{
struct nbd_config *config;
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
- if (!config)
- return NULL;
+ if (!config) {
+ module_put(THIS_MODULE);
+ return ERR_PTR(-ENOMEM);
+ }
+
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
- try_module_get(THIS_MODULE);
return config;
}
@@ -1553,12 +1568,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd->config = nbd_alloc_config();
- if (!config) {
- ret = -ENOMEM;
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
+ ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
@@ -1798,17 +1814,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
refcount_set(&nbd->refs, 0);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
-
- /* Too big first_minor can cause duplicate creation of
- * sysfs files/links, since index << part_shift might overflow, or
- * MKDEV() expect that the max bits of first_minor is 20.
- */
disk->first_minor = index << part_shift;
- if (disk->first_minor < index || disk->first_minor > MINORMASK) {
- err = -EINVAL;
- goto out_free_work;
- }
-
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
@@ -1913,14 +1919,25 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- if (info->attrs[NBD_ATTR_INDEX])
+ if (info->attrs[NBD_ATTR_INDEX]) {
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
+
+ /*
+ * Too big first_minor can cause duplicate creation of
+ * sysfs files/links, since index << part_shift might overflow, or
+ * MKDEV() expect that the max bits of first_minor is 20.
+ */
+ if (index < 0 || index > MINORMASK >> part_shift) {
+ pr_err("illegal input index %d\n", index);
+ return -EINVAL;
+ }
+ }
if (!info->attrs[NBD_ATTR_SOCKETS]) {
- printk(KERN_ERR "nbd: must specify at least one socket\n");
+ pr_err("must specify at least one socket\n");
return -EINVAL;
}
if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
- printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
+ pr_err("must specify a size in bytes for the device\n");
return -EINVAL;
}
again:
@@ -1956,7 +1973,7 @@ again:
nbd_put(nbd);
if (index == -1)
goto again;
- printk(KERN_ERR "nbd: nbd%d already in use\n", index);
+ pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
if (WARN_ON(nbd->config)) {
@@ -1964,13 +1981,14 @@ again:
nbd_put(nbd);
return -EINVAL;
}
- config = nbd->config = nbd_alloc_config();
- if (!nbd->config) {
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
- printk(KERN_ERR "nbd: couldn't allocate config\n");
- return -ENOMEM;
+ pr_err("couldn't allocate config\n");
+ return PTR_ERR(config);
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
@@ -2023,7 +2041,7 @@ again:
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
- printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+ pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
@@ -2032,7 +2050,7 @@ again:
nbd_sock_policy,
info->extack);
if (ret != 0) {
- printk(KERN_ERR "nbd: error processing sock list\n");
+ pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
@@ -2104,7 +2122,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
if (!info->attrs[NBD_ATTR_INDEX]) {
- printk(KERN_ERR "nbd: must specify an index to disconnect\n");
+ pr_err("must specify an index to disconnect\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2112,14 +2130,12 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: couldn't find device at index %d\n",
- index);
+ pr_err("couldn't find device at index %d\n", index);
return -EINVAL;
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: device at index %d is going down\n",
- index);
+ pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
@@ -2144,7 +2160,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
if (!info->attrs[NBD_ATTR_INDEX]) {
- printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
+ pr_err("must specify a device to reconfigure\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2152,8 +2168,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
- index);
+ pr_err("couldn't find a device at index %d\n", index);
return -EINVAL;
}
if (nbd->backend) {
@@ -2174,8 +2189,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: device at index %d is going down\n",
- index);
+ pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
@@ -2239,7 +2253,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
- printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+ pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
@@ -2248,7 +2262,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
nbd_sock_policy,
info->extack);
if (ret != 0) {
- printk(KERN_ERR "nbd: error processing sock list\n");
+ pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
@@ -2465,7 +2479,7 @@ static int __init nbd_init(void)
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
if (max_part < 0) {
- printk(KERN_ERR "nbd: max_part must be >= 0\n");
+ pr_err("max_part must be >= 0\n");
return -EINVAL;
}
@@ -2528,6 +2542,12 @@ static void __exit nbd_cleanup(void)
struct nbd_device *nbd;
LIST_HEAD(del_list);
+ /*
+ * Unregister netlink interface prior to waiting
+ * for the completion of netlink commands.
+ */
+ genl_unregister_family(&nbd_genl_family);
+
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
@@ -2537,8 +2557,11 @@ static void __exit nbd_cleanup(void)
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
+ if (refcount_read(&nbd->config_refs))
+ pr_err("possibly leaking nbd_config (ref %d)\n",
+ refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
- printk(KERN_ERR "nbd: possibly leaking a device\n");
+ pr_err("possibly leaking a device\n");
nbd_put(nbd);
}
@@ -2546,7 +2569,6 @@ static void __exit nbd_cleanup(void)
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
- genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 539cfeac263d..6b67088f4ea7 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -77,12 +77,6 @@ enum {
NULL_IRQ_TIMER = 2,
};
-enum {
- NULL_Q_BIO = 0,
- NULL_Q_RQ = 1,
- NULL_Q_MQ = 2,
-};
-
static bool g_virt_boundary = false;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 4525a65e1b23..8359b43842f2 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -60,6 +60,13 @@ struct nullb_zone {
unsigned int capacity;
};
+/* Queue modes */
+enum {
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
struct nullb_device {
struct nullb *nullb;
struct config_item item;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index ed158ea4fdd1..2fdd7b20c224 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -398,10 +398,10 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
*/
if (append) {
sector = zone->wp;
- if (cmd->bio)
- cmd->bio->bi_iter.bi_sector = sector;
- else
+ if (dev->queue_mode == NULL_Q_MQ)
cmd->rq->__sector = sector;
+ else
+ cmd->bio->bi_iter.bi_sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b361583944b9..63b4f6431d2e 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true, NULL);
+ blk_execute_rq_nowait(rq, true);
return 0;
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true, NULL);
+ blk_execute_rq_nowait(rq, true);
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d624cc8eddc3..6fc7850c2b0a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,10 @@ MODULE_PARM_DESC(num_request_queues,
"0 for no limit. "
"Values > nr_cpu_ids truncated to nr_cpu_ids.");
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
+MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -74,6 +78,7 @@ struct virtio_blk {
/* num of vqs */
int num_vqs;
+ int io_queues[HCTX_MAX_TYPES];
struct virtio_blk_vq *vqs;
};
@@ -96,8 +101,7 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
-static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
- struct scatterlist *data_sg, bool have_data)
+static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
unsigned int num_out = 0, num_in = 0;
@@ -105,11 +109,11 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
- if (have_data) {
+ if (vbr->sg_table.nents) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
- sgs[num_out++] = data_sg;
+ sgs[num_out++] = vbr->sg_table.sgl;
else
- sgs[num_out + num_in++] = data_sg;
+ sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
@@ -299,6 +303,28 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ struct virtio_blk *vblk,
+ struct request *req,
+ struct virtblk_req *vbr)
+{
+ blk_status_t status;
+
+ status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ if (unlikely(status))
+ return status;
+
+ blk_mq_start_request(req);
+
+ vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(vbr->sg_table.nents < 0)) {
+ virtblk_cleanup_cmd(req);
+ return BLK_STS_RESOURCE;
+ }
+
+ return BLK_STS_OK;
+}
+
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -306,26 +332,17 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
- int num;
int qid = hctx->queue_num;
bool notify = false;
blk_status_t status;
int err;
- status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ status = virtblk_prep_rq(hctx, vblk, req, vbr);
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
- num = virtblk_map_data(hctx, req, vbr);
- if (unlikely(num < 0)) {
- virtblk_cleanup_cmd(req);
- return BLK_STS_RESOURCE;
- }
-
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -355,6 +372,75 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static bool virtblk_prep_rq_batch(struct request *req)
+{
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+
+ return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct request **rqlist,
+ struct request **requeue_list)
+{
+ unsigned long flags;
+ int err;
+ bool kick;
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ err = virtblk_add_req(vq->vq, vbr);
+ if (err) {
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ rq_list_add(requeue_list, req);
+ }
+ }
+
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ bool kick;
+
+ if (!virtblk_prep_rq_batch(req)) {
+ rq_list_move(rqlist, &requeue_list, req, prev);
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ req->rq_next = NULL;
+ kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ if (kick)
+ virtqueue_notify(vq->vq);
+
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -512,6 +598,7 @@ static int init_vq(struct virtio_blk *vblk)
const char **names;
struct virtqueue **vqs;
unsigned short num_vqs;
+ unsigned int num_poll_vqs;
struct virtio_device *vdev = vblk->vdev;
struct irq_affinity desc = { 0, };
@@ -520,6 +607,7 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
+
if (!err && !num_vqs) {
dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
return -EINVAL;
@@ -529,6 +617,17 @@ static int init_vq(struct virtio_blk *vblk)
min_not_zero(num_request_queues, nr_cpu_ids),
num_vqs);
+ num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
+
+ vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
+ vblk->io_queues[HCTX_TYPE_READ] = 0;
+ vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vblk->io_queues[HCTX_TYPE_DEFAULT],
+ vblk->io_queues[HCTX_TYPE_READ],
+ vblk->io_queues[HCTX_TYPE_POLL]);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
@@ -541,12 +640,18 @@ static int init_vq(struct virtio_blk *vblk)
goto out;
}
- for (i = 0; i < num_vqs; i++) {
+ for (i = 0; i < num_vqs - num_poll_vqs; i++) {
callbacks[i] = virtblk_done;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
names[i] = vblk->vqs[i].name;
}
+ for (; i < num_vqs; i++) {
+ callbacks[i] = NULL;
+ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
+ names[i] = vblk->vqs[i].name;
+ }
+
/* Discover virtqueues and write information to configuration. */
err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
if (err)
@@ -692,16 +797,90 @@ static const struct attribute_group *virtblk_attr_groups[] = {
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ map->nr_queues = vblk->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(&set->map[i]);
+ else
+ blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ }
+
+ return 0;
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+ virtblk_cleanup_cmd(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtblk_req *vbr;
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
- return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
- vblk->vdev, 0);
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ found++;
+ if (!blk_mq_add_to_batch(req, iob, vbr->status,
+ virtblk_complete_batch))
+ blk_mq_complete_request(req);
+ }
+
+ if (found)
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return found;
+}
+
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct virtio_blk *vblk = data;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+ WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
+ hctx->driver_data = vq;
+ return 0;
}
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
+ .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
+ .poll = virtblk_poll,
};
static unsigned int virtblk_queue_depth;
@@ -778,6 +957,9 @@ static int virtblk_probe(struct virtio_device *vdev)
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 55e004d03ced..a88ce4426400 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1221,7 +1221,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != INVALID_GRANT_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
- 0UL);
+ NULL);
rinfo->persistent_gnts_c--;
}
if (info->feature_persistent)
@@ -1244,7 +1244,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].req.u.rw.nr_segments;
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
- gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+ gnttab_end_foreign_access(persistent_gnt->gref, NULL);
if (info->feature_persistent)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
@@ -1259,7 +1259,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
- gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+ gnttab_end_foreign_access(persistent_gnt->gref, NULL);
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 76648c4fdaf4..e81a9700cfd0 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -185,31 +185,14 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- char *driver_override, *old = mc_dev->driver_override;
- char *cp;
+ int ret;
if (WARN_ON(dev->bus != &fsl_mc_bus_type))
return -EINVAL;
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- if (strlen(driver_override)) {
- mc_dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- mc_dev->driver_override = NULL;
- }
-
- kfree(old);
+ ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index 4748df7f9cd5..b39a11e6c624 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -6,3 +6,4 @@
#
source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 5f5708a249f5..46981331b38f 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,2 +1,5 @@
# Host MHI stack
obj-y += host/
+
+# Endpoint MHI stack
+obj-y += ep/
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
index b4ef9acd3ce7..f794b9c8049e 100644
--- a/drivers/bus/mhi/common.h
+++ b/drivers/bus/mhi/common.h
@@ -165,6 +165,22 @@
#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+/* State change event */
+#define MHI_SC_EV_PTR 0
+#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
+#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* EE event */
+#define MHI_EE_EV_PTR 0
+#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
+#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
+#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
/* Transfer descriptor macros */
#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
@@ -175,6 +191,12 @@
FIELD_PREP(BIT(9), ieot) | \
FIELD_PREP(BIT(8), ieob) | \
FIELD_PREP(BIT(0), chain))
+#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
+#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
/* RSC transfer descriptor macros */
#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644
index 000000000000..90ab3b040672
--- /dev/null
+++ b/drivers/bus/mhi/ep/Kconfig
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by a host processor to control
+ and communicate a modem device over a high speed peripheral
+ bus or shared memory.
+
+ MHI_BUS_EP implements the MHI protocol for the endpoint devices,
+ such as SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 000000000000..aad85f180b70
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 000000000000..a2125fa5fe2f
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+#define MHI_REG_OFFSET 0x100
+#define BHI_REG_OFFSET 0x200
+
+/* MHI registers */
+#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN)
+#define EP_MHIVER (MHI_REG_OFFSET + MHIVER)
+#define EP_MHICFG (MHI_REG_OFFSET + MHICFG)
+#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF)
+#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF)
+#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF)
+#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF)
+#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF)
+#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL)
+#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS)
+#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER)
+#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER)
+#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER)
+#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER)
+#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER)
+#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER)
+#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER)
+#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER)
+#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER)
+#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER)
+#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER)
+#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER)
+#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER)
+#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER)
+#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER)
+#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER)
+
+/* MHI BHI registers */
+#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC)
+#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV)
+
+/* MHI Doorbell registers */
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+
+#define MHI_CTRL_INT_STATUS 0x4
+#define MHI_CTRL_INT_STATUS_MSK BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1)
+#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+
+/*
+ * Unlike the usual "masking" convention, writing "1" to a bit in this register
+ * enables the interrupt and writing "0" will disable it..
+ */
+#define MHI_CTRL_INT_MASK 0x94
+#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_CRDB_MASK BIT(1)
+
+#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS 1
+#define MHI_MASK_ROWS_CH_DB 4
+#define MHI_MASK_ROWS_EV_DB 4
+#define MHI_MASK_CH_LEN 32
+#define MHI_MASK_EV_LEN 32
+
+/* Generic context */
+struct mhi_generic_ctx {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+};
+
+/* Ring element */
+union mhi_ep_ring_ctx {
+ struct mhi_cmd_ctxt cmd;
+ struct mhi_event_ctxt ev;
+ struct mhi_chan_ctxt ch;
+ struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring_item {
+ struct list_head node;
+ struct mhi_ep_ring *ring;
+};
+
+struct mhi_ep_ring {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ union mhi_ep_ring_ctx *ring_ctx;
+ struct mhi_ring_element *ring_cache;
+ enum mhi_ep_ring_type type;
+ u64 rbase;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ u32 ch_id;
+ u32 er_index;
+ u32 irq_vector;
+ bool started;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+ struct list_head node;
+ enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_ring ring;
+ struct mutex lock;
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+ u32 chan;
+ bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/* MMIO related functions */
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val);
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 000000000000..40109a79017a
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Endpoint bus stack
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define M0_WAIT_DELAY_MS 100
+#define M0_WAIT_COUNT 100
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ struct mhi_ring_element *el, bool bei)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_ctx *ctx;
+ struct mhi_ep_ring *ring;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ ring = &mhi_cntrl->mhi_event[ring_idx].ring;
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
+ if (!ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+ }
+
+ /* Add element to the event ring */
+ ret = mhi_ep_ring_add_element(ring, el);
+ if (ret) {
+ dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /*
+ * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+ * set this flag for interrupt moderation as per MHI protocol.
+ */
+ if (!bei)
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+{
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_SC_EV_DWORD0(state);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event.dword[0] = MHI_CC_EV_DWORD0(code);
+ event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_ring *ch_ring;
+ u32 tmp, ch_id;
+ int ret;
+
+ ch_id = MHI_TRE_GET_CMD_CHID(el);
+ mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+ switch (MHI_TRE_GET_CMD_TYPE(el)) {
+ case MHI_PKT_TYPE_START_CHAN_CMD:
+ dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
+
+ mutex_lock(&mhi_chan->lock);
+ /* Initialize and configure the corresponding channel ring */
+ if (!ch_ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_EV_CC_UNDEFINED_ERR);
+ if (ret)
+ dev_err(dev, "Error sending completion event: %d\n", ret);
+
+ goto err_unlock;
+ }
+ }
+
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ /*
+ * Create MHI device only during UL channel start. Since the MHI
+ * channels operate in a pair, we'll associate both UL and DL
+ * channels to the same device.
+ *
+ * We also need to check for mhi_dev != NULL because, the host
+ * will issue START_CHAN command during resume and we don't
+ * destroy the device during suspend.
+ */
+ if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+ ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+ }
+
+ /* Finally, enable DB for the channel */
+ mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
+
+ break;
+ case MHI_PKT_TYPE_STOP_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Disable DB for the channel */
+ mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+
+ /* Send channel disconnect status to client drivers */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to STOP */
+ mhi_chan->state = MHI_CH_STATE_STOP;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ case MHI_PKT_TYPE_RESET_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Stop and reset the transfer ring */
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+
+ /* Send channel disconnect status to client driver */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ default:
+ dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
+ MHI_TRE_GET_CMD_TYPE(el), ch_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t tr_len, read_offset, write_offset;
+ struct mhi_ring_element *el;
+ bool tr_done = false;
+ void *write_addr;
+ u64 read_addr;
+ u32 buf_left;
+ int ret;
+
+ buf_left = len;
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ return -ENODEV;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ /* Check if there is data pending to be read from previous read operation */
+ if (mhi_chan->tre_bytes_left) {
+ dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+ tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ } else {
+ mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ tr_len = min(buf_left, mhi_chan->tre_size);
+ }
+
+ read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ write_offset = len - buf_left;
+ read_addr = mhi_chan->tre_loc + read_offset;
+ write_addr = result->buf_addr + write_offset;
+
+ dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+ return ret;
+ }
+
+ buf_left -= tr_len;
+ mhi_chan->tre_bytes_left -= tr_len;
+
+ /*
+ * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+ * read completely:
+ *
+ * 1. Send completion event to the host based on the flags set in TRE.
+ * 2. Increment the local read offset of the transfer ring.
+ */
+ if (!mhi_chan->tre_bytes_left) {
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+
+ tr_done = true;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ }
+
+ result->bytes_xferd += tr_len;
+ } while (buf_left && !tr_done);
+
+ return 0;
+}
+
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ /*
+ * Bail out if transfer callback is not registered for the channel.
+ * This is most likely due to the client driver not loaded at this point.
+ */
+ if (!mhi_chan->xfer_cb) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+ return -ENODEV;
+ }
+
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ result.buf_addr = kzalloc(len, GFP_KERNEL);
+ if (!result.buf_addr)
+ return -ENOMEM;
+
+ do {
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ kfree(result.buf_addr);
+ return ret;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ result.bytes_xferd = 0;
+ memset(result.buf_addr, 0, len);
+
+ /* Read until the ring becomes empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+
+ kfree(result.buf_addr);
+ }
+
+ return 0;
+}
+
+/* TODO: Handle partially formed TDs */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ u32 buf_left, read_offset;
+ struct mhi_ep_ring *ring;
+ enum mhi_ev_ccs code;
+ void *read_addr;
+ u64 write_addr;
+ size_t tr_len;
+ u32 tre_len;
+ int ret;
+
+ buf_left = skb->len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
+ dev_err(dev, "TRE not available!\n");
+ ret = -ENOSPC;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+ tr_len = min(buf_left, tre_len);
+ read_offset = skb->len - buf_left;
+ read_addr = skb->data + read_offset;
+ write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
+ goto err_exit;
+ }
+
+ buf_left -= tr_len;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+ if (buf_left)
+ code = MHI_EV_CC_OVERFLOW;
+ else
+ code = MHI_EV_CC_EOT;
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (buf_left);
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host channel context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
+ &mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ch_ctx_cache,
+ ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
+ return ret;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host event context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
+ &mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ev_ctx_cache,
+ ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host command context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
+ &mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
+ cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
+ goto err_ev_ctx;
+ }
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the command ring\n");
+ goto err_cmd_ctx;
+ }
+
+ return ret;
+
+err_cmd_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+err_ev_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+err_ch_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ /*
+ * Doorbell interrupts are enabled when the corresponding channel gets started.
+ * Enabling all interrupts here triggers spurious irqs as some of the interrupts
+ * associated with hw channels always get triggered.
+ */
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ bool mhi_reset;
+ u32 count = 0;
+ int ret;
+
+ /* Wait for Host to set the M0 state */
+ do {
+ msleep(M0_WAIT_DELAY_MS);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ /* Clear the MHI reset if host is in reset state */
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_info(dev, "Detected Host reset while waiting for M0\n");
+ }
+ count++;
+ } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
+
+ if (state != MHI_STATE_M0) {
+ dev_err(dev, "Host failed to enter M0\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache host config\n");
+ return ret;
+ }
+
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Enable all interrupts now */
+ mhi_ep_enable_int(mhi_cntrl);
+
+ return 0;
+}
+
+static void mhi_ep_cmd_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ int ret;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ return;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset)
+ return;
+
+ /*
+ * Process command ring element till write offset. In case of an error, just try to
+ * process next element.
+ */
+ while (ring->rd_offset != ring->wr_offset) {
+ el = &ring->ring_cache[ring->rd_offset];
+
+ ret = mhi_ep_process_cmd_ring(ring, el);
+ if (ret)
+ dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
+
+ mhi_ep_ring_inc_index(ring);
+ }
+}
+
+static void mhi_ep_ch_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_item *itr, *tmp;
+ struct mhi_ring_element *el;
+ struct mhi_ep_ring *ring;
+ struct mhi_ep_chan *chan;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ /* Process each queued channel ring. In case of an error, just process next element. */
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ ring = itr->ring;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ kfree(itr);
+ continue;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset) {
+ kfree(itr);
+ continue;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ mutex_lock(&chan->lock);
+ dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ ret = mhi_ep_process_ch_ring(ring, el);
+ if (ret) {
+ dev_err(dev, "Error processing ring for channel (%u): %d\n",
+ ring->ch_id, ret);
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_state_transition *itr, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling MHI state transition to %s\n",
+ mhi_state_str(itr->state));
+
+ switch (itr->state) {
+ case MHI_STATE_M0:
+ ret = mhi_ep_set_m0_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M0 state\n");
+ break;
+ case MHI_STATE_M3:
+ ret = mhi_ep_set_m3_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M3 state\n");
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+ u32 ch_idx)
+{
+ struct mhi_ep_ring_item *item;
+ struct mhi_ep_ring *ring;
+ bool work = !!ch_int;
+ LIST_HEAD(head);
+ u32 i;
+
+ /* First add the ring items to a local list */
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->ring = ring;
+ list_add_tail(&item->node, &head);
+ }
+
+ /* Now, splice the local list into ch_db_list and queue the work item */
+ if (work) {
+ spin_lock(&mhi_cntrl->list_lock);
+ list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ch_int, ch_idx, i;
+
+ /* Bail out if there is no channel doorbell interrupt */
+ if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+ return;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+ if (ch_int) {
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item;
+
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value;
+ bool mhi_reset;
+
+ /* Acknowledge the ctrl interrupt */
+ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ dev_info(dev, "Host triggered MHI reset!\n");
+ disable_irq_nosync(mhi_cntrl->irq);
+ schedule_work(&mhi_cntrl->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_ring *ch_ring, *ev_ring;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int i;
+
+ /* Stop all the channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ flush_workqueue(mhi_cntrl->wq);
+
+ /* Destroy devices associated with all channels */
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+ /* Stop and reset the transfer rings */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ mutex_lock(&mhi_chan->lock);
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Stop and reset the event rings */
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ ev_ring = &mhi_cntrl->mhi_event[i].ring;
+ if (!ev_ring->started)
+ continue;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ }
+
+ /* Stop and reset the command ring */
+ mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+ mhi_ep_free_host_cfg(mhi_cntrl);
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_cntrl->enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state cur_state;
+ int ret;
+
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ mhi_ep_mmio_reset(mhi_cntrl);
+ cur_state = mhi_cntrl->mhi_state;
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ /*
+ * Only proceed further if the reset is due to SYS_ERR. The host will
+ * issue reset during shutdown also and we don't need to do re-init in
+ * that case.
+ */
+ if (cur_state == MHI_STATE_SYS_ERR) {
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ return;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+ return;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ }
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ return;
+
+ /* Signal host that the device went to SYS_ERR state */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ /*
+ * Mask all interrupts until the state machine is ready. Interrupts will
+ * be enabled later with mhi_ep_enable().
+ */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+ mhi_cntrl->mhi_state = MHI_STATE_RESET;
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ goto err_free_event;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint\n");
+ goto err_free_event;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ mhi_cntrl->enabled = true;
+
+ return 0;
+
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ if (mhi_cntrl->enabled)
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ mhi_dev->mhi_cntrl->mhi_dev = NULL;
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created in mhi_ep_create_device()
+ * if the mhi_dev associated with it is NULL.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_device_type dev_type)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ /* Controller device is always allocated first */
+ if (dev_type == MHI_DEVICE_CONTROLLER)
+ /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ else
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_type = dev_type;
+
+ return mhi_dev;
+}
+
+/*
+ * MHI channels are always defined in pairs with UL as the even numbered
+ * channel and DL as odd numbered one. This function gets UL channel (primary)
+ * as the ch_id and always looks after the next entry in channel list for
+ * the corresponding DL channel (secondary).
+ */
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ /* Check if the channel name is same for both UL and DL */
+ if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
+ dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
+ mhi_chan->name, mhi_chan[1].name);
+ return -EINVAL;
+ }
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ /* Configure primary channel */
+ mhi_dev->ul_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ mhi_dev->dl_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mhi_ep_chan *ul_chan, *dl_chan;
+
+ if (dev->bus != &mhi_ep_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_ep_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy devices created for channels */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ put_device(&ul_chan->mhi_dev->dev);
+
+ if (dl_chan)
+ put_device(&dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+ int ret = -EINVAL;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * Allocate max_channels supported by the MHI endpoint and populate
+ * only the defined channels
+ */
+ mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
+ chan, mhi_cntrl->max_chan);
+ goto error_chan_cfg;
+ }
+
+ /* Bi-directional and direction less channels are not supported */
+ if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
+ dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
+ ch_cfg->dir, chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = mhi_ep_chan_init(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+ INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+ INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
+
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ if (!mhi_cntrl->wq) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+ spin_lock_init(&mhi_cntrl->state_lock);
+ spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before enumeration */
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Set controller index */
+ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_destroy_wq;
+
+ mhi_cntrl->index = ret;
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+ goto err_ida_free;
+ }
+
+ /* Allocate the controller device */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_free_irq;
+ }
+
+ dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_put_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_put_dev:
+ put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ destroy_workqueue(mhi_cntrl->wq);
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Disconnect the channels associated with the driver */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to the client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mhi_chan->xfer_cb = NULL;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Remove the client driver now */
+ mhi_drv->remove(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ /* Client drivers should have callbacks defined for both channels */
+ if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 000000000000..b5bfd22f2c8e
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
+{
+ return readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, offset);
+ regval &= ~mask;
+ regval |= (val << __ffs(mask)) & mask;
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(dev, offset);
+ regval &= mask;
+ regval >>= __ffs(mask);
+
+ return regval;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
+{
+ u32 chid_mask, chid_shift, chdb_idx, val;
+
+ chid_shift = ch_id % 32;
+ chid_mask = BIT(chid_shift);
+ chdb_idx = ch_id / 32;
+
+ val = enable ? 1 : 0;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
+
+ /* Update the local copy of the channel mask */
+ mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
+ mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
+}
+
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ bool chdb = false;
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
+ if (mhi_cntrl->chdb[i].status)
+ chdb = true;
+ }
+
+ /* Return whether a channel doorbell interrupt occurred or not */
+ return chdb;
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
+ MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
+ mhi_cntrl->ch_ctx_host_pa = regval;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
+ mhi_cntrl->ch_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
+ mhi_cntrl->ev_ctx_host_pa = regval;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
+ mhi_cntrl->ev_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
+ mhi_cntrl->cmd_ctx_host_pa = regval;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
+ mhi_cntrl->cmd_ctx_host_pa |= regval;
+}
+
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u64 db_offset;
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
+ db_offset = regval;
+ db_offset <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
+ db_offset |= regval;
+
+ return db_offset;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
+ mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 000000000000..115518ec76a4
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+ return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ __le64 rlen;
+
+ memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
+
+ return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+ ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t start, copy_size;
+ int ret;
+
+ /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+ if (ring->type == RING_TYPE_ER)
+ return 0;
+
+ /* No need to cache the ring if write pointer is unmodified */
+ if (ring->wr_offset == end)
+ return 0;
+
+ start = ring->wr_offset;
+ if (start < end) {
+ copy_size = (end - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+ } else {
+ copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+
+ if (end) {
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
+ &ring->ring_cache[0],
+ end * sizeof(struct mhi_ring_element));
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+
+ return 0;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+ size_t wr_offset;
+ int ret;
+
+ wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+ /* Cache the host ring till write offset */
+ ret = __mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = wr_offset;
+
+ return 0;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ u64 wr_ptr;
+
+ wr_ptr = mhi_ep_mmio_get_db(ring);
+
+ return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ __le64 rp;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+ else
+ num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+ /* Check if there is space in ring for adding at least an element */
+ if (!num_free_elem) {
+ dev_err(dev, "No space left in the ring\n");
+ return -ENOSPC;
+ }
+
+ old_offset = ring->rd_offset;
+ mhi_ep_ring_inc_index(ring);
+
+ dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+
+ /* Update rp in ring context */
+ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
+ sizeof(*el));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->db_offset_h = EP_CRDB_HIGHER;
+ ring->db_offset_l = EP_CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ __le64 val;
+ int ret;
+
+ ring->mhi_cntrl = mhi_cntrl;
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
+ ring->rbase = le64_to_cpu(val);
+
+ if (ring->type == RING_TYPE_CH)
+ ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
+
+ if (ring->type == RING_TYPE_ER)
+ ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+
+ /* During ring init, both rp and wp are equal */
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
+ ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+ ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+
+ /* Allocate ring cache memory for holding the copy of host ring */
+ ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
+ ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
+ if (ret) {
+ dev_err(dev, "Failed to cache ring\n");
+ kfree(ring->ring_cache);
+ return ret;
+ }
+
+ ring->started = true;
+
+ return 0;
+}
+
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+ ring->started = false;
+ kfree(ring->ring_cache);
+ ring->ring_cache = NULL;
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 000000000000..3655c19e23c7
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state)
+{
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ return true; /* Allowed in any state */
+
+ if (mhi_state == MHI_STATE_READY)
+ return cur_mhi_state == MHI_STATE_RESET;
+
+ if (mhi_state == MHI_STATE_M0)
+ return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
+
+ if (mhi_state == MHI_STATE_M3)
+ return cur_mhi_state == MHI_STATE_M0;
+
+ return false;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+ dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+ mhi_state_str(mhi_state),
+ mhi_state_str(mhi_cntrl->mhi_state));
+ return -EACCES;
+ }
+
+ /* TODO: Add support for M1 and M2 states */
+ if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
+ dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
+ return -EOPNOTSUPP;
+ }
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
+ mhi_cntrl->mhi_state = mhi_state;
+
+ if (mhi_state == MHI_STATE_READY)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
+
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
+
+ return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state old_state;
+ int ret;
+
+ /* If MHI is in M3, resume suspended channels */
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ /* Signal host that the device moved to M0 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ dev_err(dev, "Failed sending M0 state change event\n");
+ return ret;
+ }
+
+ if (old_state == MHI_STATE_READY) {
+ /* Send AMSS EE event to host */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ if (ret) {
+ dev_err(dev, "Failed sending AMSS EE event\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ mhi_ep_suspend_channels(mhi_cntrl);
+
+ /* Signal host that the device moved to M3 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ dev_err(dev, "Failed sending M3 state change event\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state mhi_state;
+ int ret, is_ready;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+
+ if (mhi_state != MHI_STATE_RESET || is_ready) {
+ dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return -EIO;
+ }
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret)
+ mhi_ep_handle_syserr(mhi_cntrl);
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index b0da7ca4519c..26d0eddb1477 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -19,8 +19,8 @@
#include "internal.h"
/* Setup RDDM vector table for RDDM transfer and program RXVEC */
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
- struct image_info *img_info)
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
@@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 sequence_id;
unsigned int i;
+ int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
bhi_vec->dma_addr = mhi_buf->dma_addr;
@@ -45,11 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
- mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
- BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ if (ret) {
+ dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
+ return ret;
+ }
dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+
+ return 0;
}
/* Collect RDDM buffer during kernel panic */
@@ -198,10 +205,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
- BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
read_unlock_bh(pm_lock);
+ if (ret)
+ return ret;
+
/* Wait for the image download to complete */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index a665b8e92408..c137d55ccfa0 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -86,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev,
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
+ return sysfs_emit(buf, "Serial Number: %u\n",
mhi_cntrl->serial_number);
}
static DEVICE_ATTR_RO(serial_number);
@@ -100,17 +100,30 @@ static ssize_t oem_pk_hash_show(struct device *dev,
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
- "OEMPKHASH[%d]: 0x%x\n", i,
- mhi_cntrl->oem_pk_hash[i]);
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
+ i, mhi_cntrl->oem_pk_hash[i]);
return cnt;
}
static DEVICE_ATTR_RO(oem_pk_hash);
+static ssize_t soc_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_soc_reset(mhi_cntrl);
+ return count;
+}
+static DEVICE_ATTR_WO(soc_reset);
+
static struct attribute *mhi_dev_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_oem_pk_hash.attr,
+ &dev_attr_soc_reset.attr,
NULL,
};
ATTRIBUTE_GROUPS(mhi_dev);
@@ -425,74 +438,65 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct {
u32 offset;
- u32 mask;
u32 val;
} reg_info[] = {
{
- CCABAP_HIGHER, U32_MAX,
+ CCABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
- CCABAP_LOWER, U32_MAX,
+ CCABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
- ECABAP_HIGHER, U32_MAX,
+ ECABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
- ECABAP_LOWER, U32_MAX,
+ ECABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
- CRCBAP_HIGHER, U32_MAX,
+ CRCBAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
- CRCBAP_LOWER, U32_MAX,
+ CRCBAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
- MHICFG, MHICFG_NER_MASK,
- mhi_cntrl->total_ev_rings,
- },
- {
- MHICFG, MHICFG_NHWER_MASK,
- mhi_cntrl->hw_ev_rings,
- },
- {
- MHICTRLBASE_HIGHER, U32_MAX,
+ MHICTRLBASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
- MHICTRLBASE_LOWER, U32_MAX,
+ MHICTRLBASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
- MHIDATABASE_HIGHER, U32_MAX,
+ MHIDATABASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
- MHIDATABASE_LOWER, U32_MAX,
+ MHIDATABASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
- MHICTRLLIMIT_HIGHER, U32_MAX,
+ MHICTRLLIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
- MHICTRLLIMIT_LOWER, U32_MAX,
+ MHICTRLLIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
{
- MHIDATALIMIT_HIGHER, U32_MAX,
+ MHIDATALIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
- MHIDATALIMIT_LOWER, U32_MAX,
+ MHIDATALIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
- { 0, 0, 0 }
+ {0, 0}
};
dev_dbg(dev, "Initializing MHI registers\n");
@@ -534,8 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
/* Write to MMIO registers */
for (i = 0; reg_info[i].offset; i++)
- mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
- reg_info[i].mask, reg_info[i].val);
+ mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].val);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
+ mhi_cntrl->total_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
+ mhi_cntrl->hw_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
return 0;
}
@@ -1103,8 +1121,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
*/
mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
mhi_cntrl->rddm_size);
- if (mhi_cntrl->rddm_image)
- mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ if (mhi_cntrl->rddm_image) {
+ ret = mhi_rddm_prepare(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ if (ret) {
+ mhi_free_bhie_table(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ goto error_reg_offset;
+ }
+ }
}
mutex_unlock(&mhi_cntrl->pm_mutex);
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index b47d8ef2624a..01fd10a399b6 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -324,8 +324,9 @@ int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
u32 val, u32 delayus);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
- u32 offset, u32 mask, u32 val);
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val);
void mhi_ring_er_db(struct mhi_event *mhi_event);
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
dma_addr_t db_val);
@@ -339,7 +340,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 9021be7f2359..f3aef77a6a4a 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -65,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
}
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
- u32 offset, u32 mask, u32 val)
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val)
{
int ret;
u32 tmp;
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
if (ret)
- return;
+ return ret;
tmp &= ~mask;
tmp |= (val << __ffs(mask));
mhi_write_reg(mhi_cntrl, base, offset, tmp);
+
+ return 0;
}
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
@@ -531,18 +534,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
- dma_addr_t ctxt_wp;
-
/* Update the WP */
ring->wp += ring->el_size;
- ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
- if (ring->wp >= (ring->base + ring->len)) {
+ if (ring->wp >= (ring->base + ring->len))
ring->wp = ring->base;
- ctxt_wp = ring->iommu_base;
- }
- *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
/* Update the RP */
ring->rp += ring->el_size;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 541ced27d941..841626727f6b 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -371,7 +371,16 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.sideband_wake = false,
};
-static const struct mhi_channel_config mhi_mv31_channels[] = {
+static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
+ .name = "foxconn-sdx65",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
/* MBIM Control Channel */
@@ -382,25 +391,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = {
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
};
-static struct mhi_event_config mhi_mv31_events[] = {
+static struct mhi_event_config mhi_mv3x_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 256),
MHI_EVENT_CONFIG_DATA(1, 256),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
};
-static const struct mhi_controller_config modem_mv31_config = {
+static const struct mhi_controller_config modem_mv3x_config = {
.max_channels = 128,
.timeout_ms = 20000,
- .num_channels = ARRAY_SIZE(mhi_mv31_channels),
- .ch_cfg = mhi_mv31_channels,
- .num_events = ARRAY_SIZE(mhi_mv31_events),
- .event_cfg = mhi_mv31_events,
+ .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
+ .ch_cfg = mhi_mv3x_channels,
+ .num_events = ARRAY_SIZE(mhi_mv3x_events),
+ .event_cfg = mhi_mv3x_events,
};
static const struct mhi_pci_dev_info mhi_mv31_info = {
.name = "cinterion-mv31",
- .config = &modem_mv31_config,
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_mv32_info = {
+ .name = "cinterion-mv32",
+ .config = &modem_mv3x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
@@ -446,20 +463,100 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.sideband_wake = false,
};
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+};
+
+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
+ .ch_cfg = mhi_telit_fn980_hw_v1_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
+ .event_cfg = mhi_telit_fn980_hw_v1_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
+ .name = "telit-fn980-hwv1",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_telit_fn980_hw_v1_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_telit_fn990_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_telit_fn990_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
+ .ch_cfg = mhi_telit_fn990_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+ .name = "telit-fn990",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* Telit FN980 hardware revision v1 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+ /* Telit FN990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
@@ -472,9 +569,21 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W368 (sdx65) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W373 (sdx62) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV32-WA (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00ba),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ /* MV32-WB (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00bb),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 3d90b8ecd3d9..dc2e8ff3bff2 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -129,13 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
if (state == MHI_STATE_RESET) {
- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 1);
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 1);
} else {
- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_MHISTATE_MASK, state);
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK, state);
}
+
+ if (ret)
+ dev_err(dev, "Failed to set MHI state to: %s\n",
+ mhi_state_str(state));
}
/* NOP for backward compatibility, host allowed to ring DB in M2 state */
@@ -476,6 +483,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
* hence re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+
+ if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ /* wait for ready to be set */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
+ MHISTATUS,
+ MHISTATUS_READY_MASK, 1, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to enter READY state\n");
+ }
}
dev_dbg(dev,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 18363aa2a49d..9a7d12332fad 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3395,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
- cancel_delayed_work_sync(&ddata->idle_work);
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 55f48375e3fe..69fd31ffb847 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -18,7 +18,8 @@ config TTY_PRINTK
The feature is useful to inline user messages with kernel
messages.
In order to use this feature, you should output user messages
- to /dev/ttyprintk or redirect console to this TTY.
+ to /dev/ttyprintk or redirect console to this TTY, or boot
+ the kernel with console=ttyprintk.
If unsure, say N.
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index cc296f0823bd..84ca98ed1dad 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -101,7 +101,7 @@ static inline bool should_stop_iteration(void)
{
if (need_resched())
cond_resched();
- return fatal_signal_pending(current);
+ return signal_pending(current);
}
/*
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ca5141ed5ef3..cba19bfdc44d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -100,17 +100,18 @@ static const struct seq_operations misc_seq_ops = {
static int misc_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
- struct miscdevice *c;
+ struct miscdevice *c = NULL, *iter;
int err = -ENODEV;
const struct file_operations *new_fops = NULL;
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops) {
@@ -118,11 +119,12 @@ static int misc_open(struct inode *inode, struct file *file)
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops)
goto fail;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 78baba55a8b5..8fc49b038372 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -922,7 +922,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
// BIT7:parity error
// BIT6:framing error
- if (status & (BIT7 + BIT6)) {
+ if (status & (BIT7 | BIT6)) {
if (status & BIT7)
icount->parity++;
else
@@ -1418,7 +1418,11 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
-
+ if ((cflag & CSIZE) != CS8) {
+ cflag &= ~CSIZE;
+ cflag |= CS7;
+ tty->termios.c_cflag = cflag;
+ }
info->params.data_bits = tty_get_char_size(cflag);
if (cflag & CSTOPB)
@@ -1432,10 +1436,8 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.parity = ASYNC_PARITY_ODD;
else
info->params.parity = ASYNC_PARITY_EVEN;
-#ifdef CMSPAR
if (cflag & CMSPAR)
info->params.parity = ASYNC_PARITY_SPACE;
-#endif
}
/* calculate number of jiffies to transmit a full
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index adf941c47506..ed45d04905c2 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -11,6 +11,7 @@
* of the boot process, for example.
*/
+#include <linux/console.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -163,6 +164,18 @@ static const struct tty_port_operations tpk_port_ops = {
static struct tty_driver *ttyprintk_driver;
+static struct tty_driver *ttyprintk_console_device(struct console *c,
+ int *index)
+{
+ *index = 0;
+ return ttyprintk_driver;
+}
+
+static struct console ttyprintk_console = {
+ .name = "ttyprintk",
+ .device = ttyprintk_console_device,
+};
+
static int __init ttyprintk_init(void)
{
int ret;
@@ -195,6 +208,8 @@ static int __init ttyprintk_init(void)
goto error;
}
+ register_console(&ttyprintk_console);
+
return 0;
error:
@@ -205,6 +220,7 @@ error:
static void __exit ttyprintk_exit(void)
{
+ unregister_console(&ttyprintk_console);
tty_unregister_driver(ttyprintk_driver);
tty_driver_kref_put(ttyprintk_driver);
tty_port_destroy(&tpk_port.port);
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
index 5046486011c8..0f238648dcfe 100644
--- a/drivers/char/xillybus/xillybus_class.c
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -174,18 +174,17 @@ void xillybus_cleanup_chrdev(void *private_data,
struct device *dev)
{
int minor;
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->private_data == private_data) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->private_data == private_data) {
+ unit = iter;
break;
}
- if (!found) {
+ if (!unit) {
dev_err(dev, "Weird bug: Failed to find unit\n");
mutex_unlock(&unit_mutex);
return;
@@ -216,22 +215,21 @@ int xillybus_find_inode(struct inode *inode,
{
int minor = iminor(inode);
int major = imajor(inode);
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->major == major &&
- minor >= unit->lowest_minor &&
- minor < (unit->lowest_minor + unit->num_nodes)) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->major == major &&
+ minor >= iter->lowest_minor &&
+ minor < (iter->lowest_minor + iter->num_nodes)) {
+ unit = iter;
break;
}
mutex_unlock(&unit_mutex);
- if (!found)
+ if (!unit)
return -ENODEV;
*private_data = unit->private_data;
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index dc3551796e5e..39bcbfd908b4 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
if (xdev->workq)
destroy_workqueue(xdev->workq);
+ usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index 5b022eeb838b..c56e406138db 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -683,7 +683,12 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
return ERR_PTR(ret);
}
- pdev->driver_override = "imx-scu-clk";
+ ret = driver_set_override(&pdev->dev, &pdev->driver_override,
+ "imx-scu-clk", strlen("imx-scu-clk"));
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
if (ret)
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index cfc79f942b07..03de634efc52 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -94,7 +95,8 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
clk_register_clkdev(clk, con_id, dev_id);
}
-int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
+int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
+ int nb_clks, void __iomem *clk_regs)
{
int i;
struct pxa_clk *pxa_clk;
@@ -106,6 +108,7 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
pxa_clk->gate = clks[i].gate;
+ pxa_clk->gate.reg = clk_regs + clks[i].cken_reg;
pxa_clk->gate.lock = &pxa_clk_lock;
clk = clk_register_composite(NULL, clks[i].name,
clks[i].parent_names, 2,
@@ -150,12 +153,13 @@ void pxa2xx_core_turbo_switch(bool on)
}
void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
- u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ u32 (*mdrefr_dri)(unsigned int),
void __iomem *cccr)
{
unsigned int clkcfg = freq->clkcfg;
unsigned int unused, preset_mdrefr, postset_mdrefr;
unsigned long flags;
+ void __iomem *mdrefr = pxa_smemc_get_mdrefr();
local_irq_save(flags);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 5768e0f728ce..7ec2d2821d8f 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -105,6 +105,7 @@
struct desc_clk_cken {
struct clk_hw hw;
int ckid;
+ int cken_reg;
const char *name;
const char *dev_id;
const char *con_id;
@@ -119,11 +120,12 @@ struct desc_clk_cken {
#define PXA_CKEN(_dev_id, _con_id, _name, parents, _mult_lp, _div_lp, \
_mult_hp, _div_hp, is_lp, _cken_reg, _cken_bit, flag) \
{ .ckid = CLK_ ## _name, .name = #_name, \
+ .cken_reg = _cken_reg, \
.dev_id = _dev_id, .con_id = _con_id, .parent_names = parents,\
.lp = { .mult = _mult_lp, .div = _div_lp }, \
.hp = { .mult = _mult_hp, .div = _div_hp }, \
.is_in_low_power = is_lp, \
- .gate = { .reg = (void __iomem *)_cken_reg, .bit_idx = _cken_bit }, \
+ .gate = { .bit_idx = _cken_bit }, \
.flags = flag, \
}
#define PXA_CKEN_1RATE(dev_id, con_id, name, parents, cken_reg, \
@@ -146,12 +148,13 @@ static inline int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
extern void clkdev_pxa_register(int ckid, const char *con_id,
const char *dev_id, struct clk *clk);
-extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
+extern int clk_pxa_cken_init(const struct desc_clk_cken *clks,
+ int nb_clks, void __iomem *clk_regs);
void clk_pxa_dt_common_init(struct device_node *np);
void pxa2xx_core_turbo_switch(bool on);
void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
- u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ u32 (*mdrefr_dri)(unsigned int),
void __iomem *cccr);
int pxa2xx_determine_rate(struct clk_rate_request *req,
struct pxa2xx_freq *freqs, int nb_freqs);
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index d0f957996acb..93d5907b8530 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -14,11 +14,11 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
+#include "clk-pxa2xx.h"
#define KHz 1000
#define MHz (1000 * 1000)
@@ -33,15 +33,13 @@ enum {
((T) ? CLKCFG_TURBO : 0))
#define PXA25x_CCCR(N2, M, L) (N2 << 7 | M << 5 | L)
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
/*
* Various clock factors driven by the CCCR register.
*/
+static void __iomem *clk_regs;
/* Crystal Frequency to Memory Frequency Multiplier (L) */
static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
@@ -57,30 +55,9 @@ static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory"
};
-static int get_sdram_rows(void)
-{
- static int sdram_rows;
- unsigned int drac2 = 0, drac0 = 0;
- u32 mdcnfg;
-
- if (sdram_rows)
- return sdram_rows;
-
- mdcnfg = readl_relaxed(MDCNFG);
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
- return sdram_rows;
-}
-
static u32 mdrefr_dri(unsigned int freq_khz)
{
- u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+ u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
return interval / 32;
}
@@ -121,7 +98,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = readl(CCCR);
+ unsigned long cccr = readl(clk_regs + CCCR);
unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
return parent_rate / m;
@@ -225,7 +202,7 @@ MUX_OPS(clk_pxa25x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = readl(CCCR);
+ unsigned long cccr = readl(clk_regs + CCCR);
unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
return (parent_rate / n2) * 2;
@@ -236,7 +213,7 @@ RATE_RO_OPS(clk_pxa25x_run, "run");
static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long clkcfg, cccr = readl(CCCR);
+ unsigned long clkcfg, cccr = readl(clk_regs + CCCR);
unsigned int l, m, n2, t;
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -268,7 +245,7 @@ static int clk_pxa25x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= ARRAY_SIZE(pxa25x_freqs))
return -EINVAL;
- pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, clk_regs + CCCR);
return 0;
}
@@ -345,16 +322,17 @@ static void __init pxa25x_dummy_clocks_init(void)
}
}
-int __init pxa25x_clocks_init(void)
+int __init pxa25x_clocks_init(void __iomem *regs)
{
+ clk_regs = regs;
pxa25x_base_clocks_init();
pxa25x_dummy_clocks_init();
- return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
+ return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks), clk_regs);
}
static void __init pxa25x_dt_clocks_init(struct device_node *np)
{
- pxa25x_clocks_init();
+ pxa25x_clocks_init(ioremap(0x41300000ul, 0x10));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 7b123105b5de..116c6ac666e3 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -7,16 +7,15 @@
* Heavily inspired from former arch/arm/mach-pxa/clock.c.
*/
#include <linux/clk-provider.h>
-#include <mach/pxa2xx-regs.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/of.h>
-
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
+#include "clk-pxa2xx.h"
#define KHz 1000
#define MHz (1000 * 1000)
@@ -50,41 +49,19 @@ enum {
((T) ? CLKCFG_TURBO : 0))
#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+static void __iomem *clk_regs;
+
static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory",
"system_bus"
};
-static int get_sdram_rows(void)
-{
- static int sdram_rows;
- unsigned int drac2 = 0, drac0 = 0;
- u32 mdcnfg;
-
- if (sdram_rows)
- return sdram_rows;
-
- mdcnfg = readl_relaxed(MDCNFG);
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
- return sdram_rows;
-}
-
static u32 mdrefr_dri(unsigned int freq_khz)
{
- u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+ u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
return (interval - 31) / 32;
}
@@ -124,7 +101,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
bool pxa27x_is_ppll_disabled(void)
{
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
return ccsr & (1 << CCCR_PPDIS_BIT);
}
@@ -226,7 +203,7 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long clkcfg;
unsigned int t, ht;
unsigned int l, L, n2, N;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
@@ -260,7 +237,7 @@ static int clk_pxa27x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= ARRAY_SIZE(pxa27x_freqs))
return -EINVAL;
- pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, clk_regs + CCCR);
return 0;
}
@@ -271,8 +248,8 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int l, osc_forced;
- unsigned long ccsr = readl(CCSR);
- unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
l = ccsr & CCSR_L_MASK;
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -293,7 +270,7 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -322,7 +299,7 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
unsigned int t, ht, osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -359,7 +336,7 @@ MUX_OPS(clk_pxa27x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
return (parent_rate / n2) * 2;
@@ -382,7 +359,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int b, osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -399,7 +376,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -415,8 +392,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int a, l, osc_forced;
- unsigned long cccr = readl(CCCR);
- unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -434,8 +411,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced, a;
- unsigned long cccr = readl(CCCR);
- unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -490,16 +467,17 @@ static void __init pxa27x_base_clocks_init(void)
clk_register_clk_pxa27x_lcd_base();
}
-int __init pxa27x_clocks_init(void)
+int __init pxa27x_clocks_init(void __iomem *regs)
{
+ clk_regs = regs;
pxa27x_base_clocks_init();
pxa27x_dummy_clocks_init();
- return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
+ return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks), regs);
}
static void __init pxa27x_dt_clocks_init(struct device_node *np)
{
- pxa27x_clocks_init();
+ pxa27x_clocks_init(ioremap(0x41300000ul, 0x10));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa2xx.h b/drivers/clk/pxa/clk-pxa2xx.h
new file mode 100644
index 000000000000..94b03d0e32ff
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa2xx.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __CLK_PXA2XX_H
+#define __CLK_PXA2XX_H
+
+#define CCCR (0x0000) /* Core Clock Configuration Register */
+#define CCSR (0x000C) /* Core Clock Status Register */
+#define CKEN (0x0004) /* Clock Enable Register */
+#define OSCC (0x0008) /* Oscillator Configuration Register */
+
+#define CCCR_N_MASK 0x0380 /* Run Mode Frequency to Turbo Mode Frequency Multiplier */
+#define CCCR_M_MASK 0x0060 /* Memory Frequency to Run Mode Frequency Multiplier */
+#define CCCR_L_MASK 0x001f /* Crystal Frequency to Memory Frequency Multiplier */
+
+#define CCCR_CPDIS_BIT (31)
+#define CCCR_PPDIS_BIT (30)
+#define CCCR_LCD_26_BIT (27)
+#define CCCR_A_BIT (25)
+
+#define CCSR_N2_MASK CCCR_N_MASK
+#define CCSR_M_MASK CCCR_M_MASK
+#define CCSR_L_MASK CCCR_L_MASK
+#define CCSR_N2_SHIFT 7
+
+#define CKEN_AC97CONF (31) /* AC97 Controller Configuration */
+#define CKEN_CAMERA (24) /* Camera Interface Clock Enable */
+#define CKEN_SSP1 (23) /* SSP1 Unit Clock Enable */
+#define CKEN_MEMC (22) /* Memory Controller Clock Enable */
+#define CKEN_MEMSTK (21) /* Memory Stick Host Controller */
+#define CKEN_IM (20) /* Internal Memory Clock Enable */
+#define CKEN_KEYPAD (19) /* Keypad Interface Clock Enable */
+#define CKEN_USIM (18) /* USIM Unit Clock Enable */
+#define CKEN_MSL (17) /* MSL Unit Clock Enable */
+#define CKEN_LCD (16) /* LCD Unit Clock Enable */
+#define CKEN_PWRI2C (15) /* PWR I2C Unit Clock Enable */
+#define CKEN_I2C (14) /* I2C Unit Clock Enable */
+#define CKEN_FICP (13) /* FICP Unit Clock Enable */
+#define CKEN_MMC (12) /* MMC Unit Clock Enable */
+#define CKEN_USB (11) /* USB Unit Clock Enable */
+#define CKEN_ASSP (10) /* ASSP (SSP3) Clock Enable */
+#define CKEN_USBHOST (10) /* USB Host Unit Clock Enable */
+#define CKEN_OSTIMER (9) /* OS Timer Unit Clock Enable */
+#define CKEN_NSSP (9) /* NSSP (SSP2) Clock Enable */
+#define CKEN_I2S (8) /* I2S Unit Clock Enable */
+#define CKEN_BTUART (7) /* BTUART Unit Clock Enable */
+#define CKEN_FFUART (6) /* FFUART Unit Clock Enable */
+#define CKEN_STUART (5) /* STUART Unit Clock Enable */
+#define CKEN_HWUART (4) /* HWUART Unit Clock Enable */
+#define CKEN_SSP3 (4) /* SSP3 Unit Clock Enable */
+#define CKEN_SSP (3) /* SSP Unit Clock Enable */
+#define CKEN_SSP2 (3) /* SSP2 Unit Clock Enable */
+#define CKEN_AC97 (2) /* AC97 Unit Clock Enable */
+#define CKEN_PWM1 (1) /* PWM1 Clock Enable */
+#define CKEN_PWM0 (0) /* PWM0 Clock Enable */
+
+#define OSCC_OON (1 << 1) /* 32.768kHz OON (write-once only bit) */
+#define OSCC_OOK (1 << 0) /* 32.768kHz OOK (read-only bit) */
+
+#endif
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 60db92772e72..42958a542662 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -14,8 +14,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
-#include <mach/smemc.h>
-#include <mach/pxa3xx-regs.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
+#include <linux/clk/pxa.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -23,6 +24,84 @@
#define KHz 1000
#define MHz (1000 * 1000)
+#define ACCR (0x0000) /* Application Subsystem Clock Configuration Register */
+#define ACSR (0x0004) /* Application Subsystem Clock Status Register */
+#define AICSR (0x0008) /* Application Subsystem Interrupt Control/Status Register */
+#define CKENA (0x000C) /* A Clock Enable Register */
+#define CKENB (0x0010) /* B Clock Enable Register */
+#define CKENC (0x0024) /* C Clock Enable Register */
+#define AC97_DIV (0x0014) /* AC97 clock divisor value register */
+
+#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
+#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
+#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
+#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
+#define ACCR_HSS(x) (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
+#define ACCR_XN(x) (((x) & 0x7) << 8)
+#define ACCR_XL(x) ((x) & 0x1f)
+
+/*
+ * Clock Enable Bit
+ */
+#define CKEN_LCD 1 /* < LCD Clock Enable */
+#define CKEN_USBH 2 /* < USB host clock enable */
+#define CKEN_CAMERA 3 /* < Camera interface clock enable */
+#define CKEN_NAND 4 /* < NAND Flash Controller Clock Enable */
+#define CKEN_USB2 6 /* < USB 2.0 client clock enable. */
+#define CKEN_DMC 8 /* < Dynamic Memory Controller clock enable */
+#define CKEN_SMC 9 /* < Static Memory Controller clock enable */
+#define CKEN_ISC 10 /* < Internal SRAM Controller clock enable */
+#define CKEN_BOOT 11 /* < Boot rom clock enable */
+#define CKEN_MMC1 12 /* < MMC1 Clock enable */
+#define CKEN_MMC2 13 /* < MMC2 clock enable */
+#define CKEN_KEYPAD 14 /* < Keypand Controller Clock Enable */
+#define CKEN_CIR 15 /* < Consumer IR Clock Enable */
+#define CKEN_USIM0 17 /* < USIM[0] Clock Enable */
+#define CKEN_USIM1 18 /* < USIM[1] Clock Enable */
+#define CKEN_TPM 19 /* < TPM clock enable */
+#define CKEN_UDC 20 /* < UDC clock enable */
+#define CKEN_BTUART 21 /* < BTUART clock enable */
+#define CKEN_FFUART 22 /* < FFUART clock enable */
+#define CKEN_STUART 23 /* < STUART clock enable */
+#define CKEN_AC97 24 /* < AC97 clock enable */
+#define CKEN_TOUCH 25 /* < Touch screen Interface Clock Enable */
+#define CKEN_SSP1 26 /* < SSP1 clock enable */
+#define CKEN_SSP2 27 /* < SSP2 clock enable */
+#define CKEN_SSP3 28 /* < SSP3 clock enable */
+#define CKEN_SSP4 29 /* < SSP4 clock enable */
+#define CKEN_MSL0 30 /* < MSL0 clock enable */
+#define CKEN_PWM0 32 /* < PWM[0] clock enable */
+#define CKEN_PWM1 33 /* < PWM[1] clock enable */
+#define CKEN_I2C 36 /* < I2C clock enable */
+#define CKEN_INTC 38 /* < Interrupt controller clock enable */
+#define CKEN_GPIO 39 /* < GPIO clock enable */
+#define CKEN_1WIRE 40 /* < 1-wire clock enable */
+#define CKEN_HSIO2 41 /* < HSIO2 clock enable */
+#define CKEN_MINI_IM 48 /* < Mini-IM */
+#define CKEN_MINI_LCD 49 /* < Mini LCD */
+
+#define CKEN_MMC3 5 /* < MMC3 Clock Enable */
+#define CKEN_MVED 43 /* < MVED clock enable */
+
+/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
+#define CKEN_PXA300_GCU 42 /* Graphics controller clock enable */
+#define CKEN_PXA320_GCU 7 /* Graphics controller clock enable */
+
+
enum {
PXA_CORE_60Mhz = 0,
PXA_CORE_RUN,
@@ -39,12 +118,12 @@ static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
/* crystal frequency to static memory controller multiplier (SMCFS) */
static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
-static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
-
static const char * const get_freq_khz[] = {
"core", "ring_osc_60mhz", "run", "cpll", "system_bus"
};
+static void __iomem *clk_regs;
+
/*
* Get the clock frequency as reflected by ACSR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -78,12 +157,27 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
return (unsigned int)clks[0] / KHz;
}
+void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
+{
+ u32 accr = readl(clk_regs + ACCR);
+
+ accr &= ~disable;
+ accr |= enable;
+
+ writel(accr, ACCR);
+ if (xclkcfg)
+ __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+
+ while ((readl(clk_regs + ACSR) & mask) != (accr & mask))
+ cpu_relax();
+}
+
static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned long ac97_div, rate;
- ac97_div = AC97_DIV;
+ ac97_div = readl(clk_regs + AC97_DIV);
/* This may loose precision for some rates but won't for the
* standard 24.576MHz.
@@ -100,18 +194,18 @@ RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
- unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+ unsigned long acsr = readl(clk_regs + ACSR);
return (parent_rate / 48) * smcfs_mult[(acsr >> 23) & 0x7] /
- df_clkdiv[(memclkcfg >> 16) & 0x3];
+ pxa3xx_smemc_get_memclkdiv();
+
}
PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
static bool pxa3xx_is_ring_osc_forced(void)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
return acsr & ACCR_D0CS;
}
@@ -123,7 +217,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? CKENB : CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
@@ -191,7 +285,7 @@ static struct desc_clk_cken pxa93x_clocks[] __initdata = {
static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int hss = (acsr >> 14) & 0x3;
if (pxa3xx_is_ring_osc_forced())
@@ -238,7 +332,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
unsigned int t, xclkcfg;
@@ -254,7 +348,7 @@ RATE_RO_OPS(clk_pxa3xx_run, "run");
static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
unsigned int xl = acsr & ACCR_XL_MASK;
unsigned int t, xclkcfg;
@@ -325,7 +419,7 @@ static void __init pxa3xx_dummy_clocks_init(void)
}
}
-static void __init pxa3xx_base_clocks_init(void)
+static void __init pxa3xx_base_clocks_init(void __iomem *oscc_reg)
{
struct clk *clk;
@@ -335,34 +429,35 @@ static void __init pxa3xx_base_clocks_init(void)
clk_register_clk_pxa3xx_ac97();
clk_register_clk_pxa3xx_smemc();
clk = clk_register_gate(NULL, "CLK_POUT",
- "osc_13mhz", 0, OSCC, 11, 0, NULL);
+ "osc_13mhz", 0, oscc_reg, 11, 0, NULL);
clk_register_clkdev(clk, "CLK_POUT", NULL);
clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
clk_register_fixed_factor(NULL, "os-timer0",
"osc_13mhz", 0, 1, 4));
}
-int __init pxa3xx_clocks_init(void)
+int __init pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg)
{
int ret;
- pxa3xx_base_clocks_init();
+ clk_regs = regs;
+ pxa3xx_base_clocks_init(oscc_reg);
pxa3xx_dummy_clocks_init();
- ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+ ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks), regs);
if (ret)
return ret;
if (cpu_is_pxa320())
return clk_pxa_cken_init(pxa320_clocks,
- ARRAY_SIZE(pxa320_clocks));
+ ARRAY_SIZE(pxa320_clocks), regs);
if (cpu_is_pxa300() || cpu_is_pxa310())
return clk_pxa_cken_init(pxa300_310_clocks,
- ARRAY_SIZE(pxa300_310_clocks));
- return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+ ARRAY_SIZE(pxa300_310_clocks), regs);
+ return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks), regs);
}
static void __init pxa3xx_dt_clocks_init(struct device_node *np)
{
- pxa3xx_clocks_init();
+ pxa3xx_clocks_init(ioremap(0x41340000, 0x10), ioremap(0x41350000, 4));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fe3f05dfafd9..3c0ee102fe73 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -80,7 +80,7 @@ config IXP4XX_TIMER
bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select CLKSRC_MMIO
- select TIMER_OF if OF
+ select TIMER_OF
help
Enables support for the Intel XScale IXP4xx SoC timer.
@@ -597,6 +597,14 @@ config CLKSRC_ST_LPC
Enable this option to use the Low Power controller timer
as clocksource.
+config GXP_TIMER
+ bool "GXP timer driver" if COMPILE_TEST && !ARCH_HPE
+ default ARCH_HPE
+ select TIMER_OF if OF
+ help
+ Provides a driver for the timer control found on HPE
+ GXP SOCs. This is required for all GXP SOCs.
+
config RISCV_TIMER
bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 833cfb7a96c1..6ca640019e10 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -86,3 +86,4 @@ obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
+obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index a50ab5c2154f..39f172d7e29e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2012 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 Broadcom Corporation
#include <linux/init.h>
#include <linux/irq.h>
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 5d3d88e0fc8c..a4a991101fa3 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* J-Core SoC PIT/clocksource driver
*
* Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index be4175f415ba..b3ae38f36720 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -1,10 +1,5 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
#define pr_fmt(fmt) "mips-gic-timer: " fmt
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index e3acc3c631b7..6ec565d6939a 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 370/XP SoC timer handling.
*
@@ -7,10 +8,6 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
*
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index 1e984a4d8ad0..559aa96089c3 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Conexant Digicolor timer driver
*
@@ -11,10 +12,6 @@
* Copyright (C) 2013 Maxime Ripard
*
* Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
new file mode 100644
index 000000000000..8b38b3212388
--- /dev/null
+++ b/drivers/clocksource/timer-gxp.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/sched_clock.h>
+
+#define TIMER0_FREQ 1000000
+#define GXP_TIMER_CNT_OFS 0x00
+#define GXP_TIMESTAMP_OFS 0x08
+#define GXP_TIMER_CTRL_OFS 0x14
+
+/* TCS Stands for Timer Control/Status: these are masks to be used in */
+/* the Timer Count Registers */
+#define MASK_TCS_ENABLE 0x01
+#define MASK_TCS_PERIOD 0x02
+#define MASK_TCS_RELOAD 0x04
+#define MASK_TCS_TC 0x80
+
+struct gxp_timer {
+ void __iomem *counter;
+ void __iomem *control;
+ struct clock_event_device evt;
+};
+
+static struct gxp_timer *gxp_timer;
+
+static void __iomem *system_clock __ro_after_init;
+
+static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
+{
+ return container_of(evt_dev, struct gxp_timer, evt);
+}
+
+static u64 notrace gxp_sched_read(void)
+{
+ return readl_relaxed(system_clock);
+}
+
+static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
+{
+ struct gxp_timer *timer = to_gxp_timer(evt_dev);
+
+ /* Stop counting and disable interrupt before updating */
+ writeb_relaxed(MASK_TCS_TC, timer->control);
+ writel_relaxed(event, timer->counter);
+ writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
+
+ return 0;
+}
+
+static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
+{
+ struct gxp_timer *timer = (struct gxp_timer *)dev_id;
+
+ if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
+ return IRQ_NONE;
+
+ writeb_relaxed(MASK_TCS_TC, timer->control);
+
+ timer->evt.event_handler(&timer->evt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init gxp_timer_init(struct device_node *node)
+{
+ void __iomem *base;
+ struct clk *clk;
+ u32 freq;
+ int ret, irq;
+
+ gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
+ if (!gxp_timer) {
+ ret = -ENOMEM;
+ pr_err("Can't allocate gxp_timer");
+ return ret;
+ }
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ ret = (int)PTR_ERR(clk);
+ pr_err("%pOFn clock not found: %d\n", node, ret);
+ goto err_free;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("%pOFn clock enable failed: %d\n", node, ret);
+ goto err_clk_enable;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ ret = -ENXIO;
+ pr_err("Can't map timer base registers");
+ goto err_iomap;
+ }
+
+ /* Set the offsets to the clock register and timer registers */
+ gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
+ gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
+ system_clock = base + GXP_TIMESTAMP_OFS;
+
+ gxp_timer->evt.name = node->name;
+ gxp_timer->evt.rating = 300;
+ gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
+ gxp_timer->evt.set_next_event = gxp_time_set_next_event;
+ gxp_timer->evt.cpumask = cpumask_of(0);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ pr_err("GXP Timer Can't parse IRQ %d", irq);
+ goto err_exit;
+ }
+
+ freq = clk_get_rate(clk);
+
+ ret = clocksource_mmio_init(system_clock, node->name, freq,
+ 300, 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn init clocksource failed: %d", node, ret);
+ goto err_exit;
+ }
+
+ sched_clock_register(gxp_sched_read, 32, freq);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ pr_err("%pOFn Can't parse IRQ %d", node, irq);
+ goto err_exit;
+ }
+
+ clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
+ 0xf, 0xffffffff);
+
+ ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
+ node->name, gxp_timer);
+ if (ret) {
+ pr_err("%pOFn request_irq() failed: %d", node, ret);
+ goto err_exit;
+ }
+
+ pr_debug("gxp: system timer (irq = %d)\n", irq);
+ return 0;
+
+err_exit:
+ iounmap(base);
+err_iomap:
+ clk_disable_unprepare(clk);
+err_clk_enable:
+ clk_put(clk);
+err_free:
+ kfree(gxp_timer);
+ return ret;
+}
+
+/*
+ * This probe gets called after the timer is already up and running. This will create
+ * the watchdog device as a child since the registers are shared.
+ */
+
+static int gxp_timer_probe(struct platform_device *pdev)
+{
+ struct platform_device *gxp_watchdog_device;
+ struct device *dev = &pdev->dev;
+
+ if (!gxp_timer) {
+ pr_err("Gxp Timer not initialized, cannot create watchdog");
+ return -ENOMEM;
+ }
+
+ gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
+ if (!gxp_watchdog_device) {
+ pr_err("Timer failed to allocate gxp-wdt");
+ return -ENOMEM;
+ }
+
+ /* Pass the base address (counter) as platform data and nothing else */
+ gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
+ gxp_watchdog_device->dev.parent = dev;
+
+ return platform_device_add(gxp_watchdog_device);
+}
+
+static const struct of_device_id gxp_timer_of_match[] = {
+ { .compatible = "hpe,gxp-timer", },
+ {},
+};
+
+static struct platform_driver gxp_timer_driver = {
+ .probe = gxp_timer_probe,
+ .driver = {
+ .name = "gxp-timer",
+ .of_match_table = gxp_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(gxp_timer_driver);
+
+TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
index cbb184953510..720ed70a2964 100644
--- a/drivers/clocksource/timer-ixp4xx.c
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -19,8 +19,6 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
-/* Goes away with OF conversion */
-#include <linux/platform_data/timer-ixp4xx.h>
/*
* Constants to make it easy to access Timer Control/Status registers
@@ -263,28 +261,6 @@ static struct platform_driver ixp4xx_timer_driver = {
};
builtin_platform_driver(ixp4xx_timer_driver);
-/**
- * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
- * @timerbase: physical base of timer block
- * @timer_irq: Linux IRQ number for the timer
- * @timer_freq: Fixed frequency of the timer
- */
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
- int timer_irq,
- unsigned int timer_freq)
-{
- void __iomem *base;
-
- base = ioremap(timerbase, 0x100);
- if (!base) {
- pr_crit("IXP4xx: can't remap timer\n");
- return;
- }
- ixp4xx_timer_register(base, timer_irq, timer_freq);
-}
-EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
-
-#ifdef CONFIG_OF
static __init int ixp4xx_of_timer_init(struct device_node *np)
{
void __iomem *base;
@@ -315,4 +291,3 @@ out_unmap:
return ret;
}
TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
-#endif
diff --git a/drivers/clocksource/timer-lpc32xx.c b/drivers/clocksource/timer-lpc32xx.c
index d51a62a79ef7..68eae6378bf3 100644
--- a/drivers/clocksource/timer-lpc32xx.c
+++ b/drivers/clocksource/timer-lpc32xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Clocksource driver for NXP LPC32xx/18xx/43xx timer
*
@@ -6,11 +7,6 @@
* Based on:
* time-efm32 Copyright (C) 2013 Pengutronix
* mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index 5101e834d78f..49e86cb70a7a 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Orion SoC timer handling.
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
*/
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index 56c0cc32d0ac..d514b44e67dd 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
}
rps->irq = irq_of_parse_and_map(np, 0);
- if (rps->irq < 0) {
+ if (!rps->irq) {
ret = -EINVAL;
goto err_iomap;
}
diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c
index 69c069e6f0a2..57b2197a0b67 100644
--- a/drivers/clocksource/timer-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Pistachio clocksource based on general-purpose timers
*
* Copyright (C) 2015 Imagination Technologies
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 1767f8bf2013..593d5a957b69 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -34,7 +34,7 @@ static int riscv_clock_next_event(unsigned long delta,
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 401d592e85f5..e6a87f4af2b5 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -259,6 +259,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
+ if (initialized) {
+ pr_debug("%pOF: skipping further SP804 timer device\n", np);
+ return 0;
+ }
+
base = of_iomap(np, 0);
if (!base)
return -ENXIO;
@@ -270,11 +275,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
writel(0, timer1_base + timer->ctrl);
writel(0, timer2_base + timer->ctrl);
- if (initialized || !of_device_is_available(np)) {
- ret = -EINVAL;
- goto err;
- }
-
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 0ba8155b8287..bb6ea6c19829 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner A1X SoCs timer handling.
*
@@ -8,10 +9,6 @@
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
* Benn Huang <benn@allwinnertech.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 552c5254390c..85900f7fc69f 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner SoCs hstimer driver.
*
* Copyright (C) 2013 Maxime Ripard
*
* Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index df4a73ea6651..c194e8f74e1d 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -828,8 +828,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
cpu_pm_register_notifier(&timer->nb);
}
- if (pdata)
- timer->errata = pdata->timer_errata;
+ timer->errata = pdata->timer_errata;
timer->pdev = pdev;
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 8eb1f699a857..d4e2ed709bfc 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -854,7 +854,7 @@ int comedi_load_firmware(struct comedi_device *dev,
release_firmware(fw);
}
- return ret < 0 ? ret : 0;
+ return min(ret, 0);
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index f0b6f52eb2c3..ed1ae061a687 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -24,11 +24,9 @@
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/io.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
-
#ifdef DEBUG
static unsigned int freq_debug;
module_param(freq_debug, uint, 0);
@@ -106,8 +104,6 @@ static struct pxa_freqs pxa27x_freqs[] = {
static struct cpufreq_frequency_table
pxa27x_freq_table[NUM_PXA27x_FREQS+1];
-extern unsigned get_clk_frequency_khz(int info);
-
#ifdef CONFIG_REGULATOR
static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 32f993c94675..4afa48d172db 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -8,12 +8,11 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <mach/generic.h>
-#include <mach/pxa3xx-regs.h>
-
#define HSS_104M (0)
#define HSS_156M (1)
#define HSS_208M (2)
@@ -34,6 +33,28 @@
#define DMCFS_26M (0)
#define DMCFS_260M (3)
+#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
+#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
+#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
+#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
+#define ACCR_HSS(x) (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
+#define ACCR_XN(x) (((x) & 0x7) << 8)
+#define ACCR_XL(x) ((x) & 0x1f)
+
struct pxa3xx_freq_info {
unsigned int cpufreq_mhz;
unsigned int core_xl : 5;
@@ -111,41 +132,29 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
static void __update_core_freq(struct pxa3xx_freq_info *info)
{
- uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
- uint32_t accr = ACCR;
- uint32_t xclkcfg;
-
- accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
- accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+ u32 mask, disable, enable, xclkcfg;
+ mask = ACCR_XN_MASK | ACCR_XL_MASK;
+ disable = mask | ACCR_XSPCLK_MASK;
+ enable = ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
/* No clock until core PLL is re-locked */
- accr |= ACCR_XSPCLK(XSPCLK_NONE);
-
+ enable |= ACCR_XSPCLK(XSPCLK_NONE);
xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
- ACCR = accr;
- __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
-
- while ((ACSR & mask) != (accr & mask))
- cpu_relax();
+ pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask);
}
static void __update_bus_freq(struct pxa3xx_freq_info *info)
{
- uint32_t mask;
- uint32_t accr = ACCR;
-
- mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
- ACCR_DMCFS_MASK;
-
- accr &= ~mask;
- accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
- ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+ u32 mask, disable, enable;
- ACCR = accr;
+ mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+ ACCR_DMCFS_MASK;
+ disable = mask;
+ enable = ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+ ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
- while ((ACSR & mask) != (accr & mask))
- cpu_relax();
+ pxa3xx_clk_update_accr(disable, enable, 0, mask);
}
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index f3ec9420215e..2a60d0525cde 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -90,9 +90,12 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
akcipher_req = vc_akcipher_req->akcipher_req;
- if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+ if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
+ /* actuall length maybe less than dst buffer */
+ akcipher_req->dst_len = len - sizeof(vc_req->status);
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+ }
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
@@ -103,54 +106,56 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
uint8_t *pkey;
- unsigned int inlen;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
pkey = kmemdup(key, keylen, GFP_ATOMIC);
if (!pkey)
return -ENOMEM;
- spin_lock(&vcrypto->ctrl_lock);
- memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
- memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
- vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
- sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ ctrl = &vc_ctrl_req->ctrl;
+ memcpy(&ctrl->header, header, sizeof(ctrl->header));
+ memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&key_sg, pkey, keylen);
sgs[num_out++] = &key_sg;
- sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+ sg_init_one(&inhdr_sg, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
- virtqueue_kick(vcrypto->ctrl_vq);
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
- ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+ ctx->session_id = le64_to_cpu(input->session_id);
ctx->session_valid = true;
err = 0;
out:
- spin_unlock(&vcrypto->ctrl_lock);
+ kfree(vc_ctrl_req);
kfree_sensitive(pkey);
- if (err < 0)
- pr_err("virtio_crypto: Create session failed status: %u\n",
- le32_to_cpu(vcrypto->input.status));
-
return err;
}
@@ -159,37 +164,41 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
- unsigned int num_out = 0, num_in = 0, inlen;
+ unsigned int num_out = 0, num_in = 0;
int err;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
- spin_lock(&vcrypto->ctrl_lock);
- if (!ctx->session_valid) {
- err = 0;
- goto out;
- }
- vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
- vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
- vcrypto->ctrl.header.queue_id = 0;
+ if (!ctx->session_valid)
+ return 0;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+ ctrl->header.queue_id = 0;
- destroy_session = &vcrypto->ctrl.u.destroy_session;
+ destroy_session = &ctrl->u.destroy_session;
destroy_session->session_id = cpu_to_le64(ctx->session_id);
- sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
- sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+ sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &inhdr_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
- virtqueue_kick(vcrypto->ctrl_vq);
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
@@ -198,11 +207,7 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
ctx->session_valid = false;
out:
- spin_unlock(&vcrypto->ctrl_lock);
- if (err < 0) {
- pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
- vcrypto->ctrl_status.status, destroy_session->session_id);
- }
+ kfree(vc_ctrl_req);
return err;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e693d4ee83a6..59a4c0259456 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -13,6 +13,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
+#include <uapi/linux/virtio_crypto.h>
/* Internal representation of a data virtqueue */
@@ -65,11 +66,6 @@ struct virtio_crypto {
/* Maximum size of per request */
u64 max_size;
- /* Control VQ buffers: protected by the ctrl_lock */
- struct virtio_crypto_op_ctrl_req ctrl;
- struct virtio_crypto_session_input input;
- struct virtio_crypto_inhdr ctrl_status;
-
unsigned long status;
atomic_t ref_count;
struct list_head list;
@@ -85,6 +81,18 @@ struct virtio_crypto_sym_session_info {
__u64 session_id;
};
+/*
+ * Note: there are padding fields in request, clear them to zero before
+ * sending to host to avoid to divulge any information.
+ * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
+ */
+struct virtio_crypto_ctrl_request {
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+ struct completion compl;
+};
+
struct virtio_crypto_request;
typedef void (*virtio_crypto_data_callback)
(struct virtio_crypto_request *vc_req, int len);
@@ -134,5 +142,8 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c6f482db0bc0..1198bd306365 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -22,6 +22,56 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
}
}
+static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ complete(&vc_ctrl_req->compl);
+}
+
+static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ virtio_crypto_ctrlq_callback(vc_ctrl_req);
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ }
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+}
+
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ int err;
+ unsigned long flags;
+
+ init_completion(&vc_ctrl_req->compl);
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ return err;
+ }
+
+ virtqueue_kick(vcrypto->ctrl_vq);
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+
+ wait_for_completion(&vc_ctrl_req->compl);
+
+ return 0;
+}
+
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
@@ -73,7 +123,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
goto err_names;
/* Parameters for control virtqueue */
- callbacks[total_vqs - 1] = NULL;
+ callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */
@@ -94,7 +144,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
goto err_engine;
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index a618c46a52b8..e553ccadbcbc 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
int encrypt)
{
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
- unsigned int tmp;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_sym_create_session_req *sym_create_session;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
/*
* Avoid to do DMA from the stack, switch to using
@@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
if (!cipher_key)
return -ENOMEM;
- spin_lock(&vcrypto->ctrl_lock);
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
/* Pad ctrl header */
- vcrypto->ctrl.header.opcode =
- cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
- vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ ctrl->header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */
- vcrypto->ctrl.header.queue_id = 0;
+ ctrl->header.queue_id = 0;
- vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */
- vcrypto->ctrl.u.sym_create_session.op_type =
- cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
- vcrypto->ctrl.header.algo;
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
- cpu_to_le32(keylen);
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
- cpu_to_le32(op);
-
- sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sym_create_session = &ctrl->u.sym_create_session;
+ sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ sym_create_session->u.cipher.para.algo = ctrl->header.algo;
+ sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
+ sym_create_session->u.cipher.para.op = cpu_to_le32(op);
+
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Set key */
@@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
sgs[num_out++] = &key_sg;
/* Return status and session id back */
- sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+ sg_init_one(&inhdr, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
- num_in, vcrypto, GFP_ATOMIC);
- if (err < 0) {
- spin_unlock(&vcrypto->ctrl_lock);
- kfree_sensitive(cipher_key);
- return err;
- }
- virtqueue_kick(vcrypto->ctrl_vq);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
- /*
- * Trapping into the hypervisor, so the request should be
- * handled immediately.
- */
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
- spin_unlock(&vcrypto->ctrl_lock);
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
- le32_to_cpu(vcrypto->input.status));
- kfree_sensitive(cipher_key);
- return -EINVAL;
+ le32_to_cpu(input->status));
+ err = -EINVAL;
+ goto out;
}
if (encrypt)
- ctx->enc_sess_info.session_id =
- le64_to_cpu(vcrypto->input.session_id);
+ ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
else
- ctx->dec_sess_info.session_id =
- le64_to_cpu(vcrypto->input.session_id);
-
- spin_unlock(&vcrypto->ctrl_lock);
+ ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
kfree_sensitive(cipher_key);
- return 0;
+ return err;
}
static int virtio_crypto_alg_skcipher_close_session(
@@ -206,60 +197,55 @@ static int virtio_crypto_alg_skcipher_close_session(
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
- unsigned int tmp;
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
- spin_lock(&vcrypto->ctrl_lock);
- vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */
- vcrypto->ctrl.header.opcode =
- cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */
- vcrypto->ctrl.header.queue_id = 0;
+ ctrl->header.queue_id = 0;
- destroy_session = &vcrypto->ctrl.u.destroy_session;
+ destroy_session = &ctrl->u.destroy_session;
if (encrypt)
- destroy_session->session_id =
- cpu_to_le64(ctx->enc_sess_info.session_id);
+ destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
else
- destroy_session->session_id =
- cpu_to_le64(ctx->dec_sess_info.session_id);
+ destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
- sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Return status and session id back */
- sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
- sizeof(vcrypto->ctrl_status.status));
+ sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &status_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
- num_in, vcrypto, GFP_ATOMIC);
- if (err < 0) {
- spin_unlock(&vcrypto->ctrl_lock);
- return err;
- }
- virtqueue_kick(vcrypto->ctrl_vq);
-
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
- if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
- spin_unlock(&vcrypto->ctrl_lock);
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
- vcrypto->ctrl_status.status,
- destroy_session->session_id);
+ ctrl_status->status, destroy_session->session_id);
return -EINVAL;
}
- spin_unlock(&vcrypto->ctrl_lock);
- return 0;
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
+ return err;
}
static int virtio_crypto_alg_skcipher_init_sessions(
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 005a82f671c3..0e5a5662d5a4 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -216,8 +216,11 @@ static int __init dio_init(void)
/* Found a board, allocate it an entry in the list */
dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
- if (!dev)
+ if (!dev) {
+ if (scode >= DIOII_SCBASE)
+ iounmap(va);
return -ENOMEM;
+ }
dev->bus = &dio_bus;
dev->dev.parent = &dio_bus.dev;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 0d42e49105dd..dca7cecb37e3 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -131,6 +131,7 @@ config EXTCON_PALMAS
config EXTCON_PTN5150
tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
depends on I2C && (GPIOLIB || COMPILE_TEST)
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
select REGMAP_I2C
help
Say Y here to enable support for USB peripheral and USB host
@@ -156,7 +157,7 @@ config EXTCON_RT8973A
from abnormal high input voltage (up to 28V).
config EXTCON_SM5502
- tristate "Silicon Mitus SM5502/SM5504 EXTCON support"
+ tristate "Silicon Mitus SM5502/SM5504/SM5703 EXTCON support"
depends on I2C
select IRQ_DOMAIN
select REGMAP_I2C
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 7c6d5857ff25..180be768c215 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -394,8 +394,8 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
put_device(&adev->dev);
- if (!info->id_extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(info->id_extcon))
+ return PTR_ERR(info->id_extcon);
dev_info(dev, "controlling USB role\n");
} else {
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index fb527c23639e..ded1a85a5549 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#define INT3496_GPIO_USB_ID 0
#define INT3496_GPIO_VBUS_EN 1
@@ -30,7 +31,9 @@ struct int3496_data {
struct gpio_desc *gpio_usb_id;
struct gpio_desc *gpio_vbus_en;
struct gpio_desc *gpio_usb_mux;
+ struct regulator *vbus_boost;
int usb_id_irq;
+ bool vbus_boost_enabled;
};
static const unsigned int int3496_cable[] = {
@@ -53,6 +56,27 @@ static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
{ },
};
+static void int3496_set_vbus_boost(struct int3496_data *data, bool enable)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(data->vbus_boost))
+ return;
+
+ if (data->vbus_boost_enabled == enable)
+ return;
+
+ if (enable)
+ ret = regulator_enable(data->vbus_boost);
+ else
+ ret = regulator_disable(data->vbus_boost);
+
+ if (ret == 0)
+ data->vbus_boost_enabled = enable;
+ else
+ dev_err(data->dev, "Error updating Vbus boost regulator: %d\n", ret);
+}
+
static void int3496_do_usb_id(struct work_struct *work)
{
struct int3496_data *data =
@@ -71,6 +95,8 @@ static void int3496_do_usb_id(struct work_struct *work)
if (!IS_ERR(data->gpio_vbus_en))
gpiod_direction_output(data->gpio_vbus_en, !id);
+ else
+ int3496_set_vbus_boost(data, !id);
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
}
@@ -91,10 +117,12 @@ static int int3496_probe(struct platform_device *pdev)
struct int3496_data *data;
int ret;
- ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
- if (ret) {
- dev_err(dev, "can't add GPIO ACPI mapping\n");
- return ret;
+ if (has_acpi_companion(dev)) {
+ ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
+ if (ret) {
+ dev_err(dev, "can't add GPIO ACPI mapping\n");
+ return ret;
+ }
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -106,7 +134,8 @@ static int int3496_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
+ data->gpio_usb_id =
+ devm_gpiod_get(dev, "id", GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
@@ -120,12 +149,14 @@ static int int3496_probe(struct platform_device *pdev)
}
data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
- if (IS_ERR(data->gpio_vbus_en))
- dev_info(dev, "can't request VBUS EN GPIO\n");
+ if (IS_ERR(data->gpio_vbus_en)) {
+ dev_dbg(dev, "can't request VBUS EN GPIO\n");
+ data->vbus_boost = devm_regulator_get_optional(dev, "vbus");
+ }
data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux))
- dev_info(dev, "can't request USB MUX GPIO\n");
+ dev_dbg(dev, "can't request USB MUX GPIO\n");
/* register extcon device */
data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
@@ -164,12 +195,19 @@ static const struct acpi_device_id int3496_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
+static const struct platform_device_id int3496_ids[] = {
+ { .name = "intel-int3496" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, int3496_ids);
+
static struct platform_driver int3496_driver = {
.driver = {
.name = "intel-int3496",
.acpi_match_table = int3496_acpi_match,
},
.probe = int3496_probe,
+ .id_table = int3496_ids,
};
module_platform_driver(int3496_driver);
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 5b9a3cf8df26..017a07197f38 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
+#include <linux/usb/role.h>
/* PTN5150 registers */
#define PTN5150_REG_DEVICE_ID 0x01
@@ -52,6 +53,7 @@ struct ptn5150_info {
int irq;
struct work_struct irq_work;
struct mutex mutex;
+ struct usb_role_switch *role_sw;
};
/* List of detectable cables */
@@ -70,6 +72,7 @@ static const struct regmap_config ptn5150_regmap_config = {
static void ptn5150_check_state(struct ptn5150_info *info)
{
unsigned int port_status, reg_data, vbus;
+ enum usb_role usb_role = USB_ROLE_NONE;
int ret;
ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
@@ -85,6 +88,7 @@ static void ptn5150_check_state(struct ptn5150_info *info)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
extcon_set_state_sync(info->edev, EXTCON_USB, true);
+ usb_role = USB_ROLE_DEVICE;
break;
case PTN5150_UFP_ATTACHED:
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -95,10 +99,18 @@ static void ptn5150_check_state(struct ptn5150_info *info)
gpiod_set_value_cansleep(info->vbus_gpiod, 1);
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ usb_role = USB_ROLE_HOST;
break;
default:
break;
}
+
+ if (usb_role) {
+ ret = usb_role_switch_set_role(info->role_sw, usb_role);
+ if (ret)
+ dev_err(info->dev, "failed to set %s role: %d\n",
+ usb_role_string(usb_role), ret);
+ }
}
static void ptn5150_irq_work(struct work_struct *work)
@@ -133,6 +145,13 @@ static void ptn5150_irq_work(struct work_struct *work)
extcon_set_state_sync(info->edev,
EXTCON_USB, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+
+ ret = usb_role_switch_set_role(info->role_sw,
+ USB_ROLE_NONE);
+ if (ret)
+ dev_err(info->dev,
+ "failed to set none role: %d\n",
+ ret);
}
}
@@ -194,6 +213,14 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return 0;
}
+static void ptn5150_work_sync_and_put(void *data)
+{
+ struct ptn5150_info *info = data;
+
+ cancel_work_sync(&info->irq_work);
+ usb_role_switch_put(info->role_sw);
+}
+
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
@@ -284,6 +311,15 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
if (ret)
return -EINVAL;
+ info->role_sw = usb_role_switch_get(info->dev);
+ if (IS_ERR(info->role_sw))
+ return dev_err_probe(info->dev, PTR_ERR(info->role_sw),
+ "failed to get role switch\n");
+
+ ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
+ if (ret)
+ return ret;
+
/*
* Update current extcon state if for example OTG connection was there
* before the probe
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 93da2d8379b1..f706f5288257 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -798,6 +798,7 @@ static const struct sm5502_type sm5504_data = {
static const struct of_device_id sm5502_dt_match[] = {
{ .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data },
{ .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data },
+ { .compatible = "siliconmitus,sm5703-muic", .data = &sm5502_data },
{ },
};
MODULE_DEVICE_TABLE(of, sm5502_dt_match);
@@ -830,6 +831,7 @@ static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
static const struct i2c_device_id sm5502_i2c_id[] = {
{ "sm5502", (kernel_ulong_t)&sm5502_data },
{ "sm5504", (kernel_ulong_t)&sm5504_data },
+ { "sm5703-muic", (kernel_ulong_t)&sm5502_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index f2b65d967384..40d967a11e87 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -226,16 +226,6 @@ static int usb_extcon_suspend(struct device *dev)
}
}
- /*
- * We don't want to process any IRQs after this point
- * as GPIOs used behind I2C subsystem might not be
- * accessible until resume completes. So disable IRQ.
- */
- if (info->id_gpiod)
- disable_irq(info->id_irq);
- if (info->vbus_gpiod)
- disable_irq(info->vbus_irq);
-
if (!device_may_wakeup(dev))
pinctrl_pm_select_sleep_state(dev);
@@ -267,11 +257,6 @@ static int usb_extcon_resume(struct device *dev)
}
}
- if (info->id_gpiod)
- enable_irq(info->id_irq);
- if (info->vbus_gpiod)
- enable_irq(info->vbus_irq);
-
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 5290cc2d19d9..fde1db62be0d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -68,7 +68,7 @@ static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a09e704fd0fa..d3a32b806499 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -399,6 +399,7 @@ static ssize_t cable_state_show(struct device *dev,
/**
* extcon_sync() - Synchronize the state for an external connector.
* @edev: the extcon device
+ * @id: the unique id indicating an external connector
*
* Note that this function send a notification in order to synchronize
* the state and property of an external connector.
@@ -736,6 +737,9 @@ EXPORT_SYMBOL_GPL(extcon_set_property);
/**
* extcon_set_property_sync() - Set property of an external connector with sync.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
* @prop_val: the pointer including the new value of extcon property
*
* Note that when setting the property value of external connector,
@@ -851,6 +855,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability);
* @extcon_name: the extcon name provided with extcon_dev_register()
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
+ * NOTE: This function returns -EPROBE_DEFER so it may only be called from
+ * probe() functions.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -864,7 +870,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
if (!strcmp(sd->name, extcon_name))
goto out;
}
- sd = NULL;
+ sd = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return sd;
@@ -1218,19 +1224,14 @@ int extcon_dev_register(struct extcon_dev *edev)
edev->dev.type = &edev->extcon_dev_type;
}
- ret = device_register(&edev->dev);
- if (ret) {
- put_device(&edev->dev);
- goto err_dev;
- }
-
spin_lock_init(&edev->lock);
- edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
- sizeof(*edev->nh), GFP_KERNEL);
- if (!edev->nh) {
- ret = -ENOMEM;
- device_unregister(&edev->dev);
- goto err_dev;
+ if (edev->max_supported) {
+ edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
+ GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
+ goto err_alloc_nh;
+ }
}
for (index = 0; index < edev->max_supported; index++)
@@ -1241,6 +1242,12 @@ int extcon_dev_register(struct extcon_dev *edev)
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
+ ret = device_register(&edev->dev);
+ if (ret) {
+ put_device(&edev->dev);
+ goto err_dev;
+ }
+
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
@@ -1249,6 +1256,9 @@ int extcon_dev_register(struct extcon_dev *edev)
err_dev:
if (edev->max_supported)
+ kfree(edev->nh);
+err_alloc_nh:
+ if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
if (edev->max_supported && edev->mutually_exclusive) {
@@ -1308,6 +1318,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
+ kfree(edev->nh);
}
put_device(&edev->dev);
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 1be0e8295222..28fcddcd688f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -32,8 +32,7 @@ obj-y += broadcom/
obj-y += cirrus/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
-obj-$(CONFIG_EFI) += efi/
-obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += smccc/
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 3a353776bd34..66727ad3361b 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -604,7 +604,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
"%d-%d", dh->type, entry->instance);
if (*ret) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return;
}
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 69353dd0ea22..5cc238916551 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -685,8 +685,7 @@ static void edd_populate_dir(struct edd_device * edev)
int i;
for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
- if (!attr->test ||
- (attr->test && attr->test(edev)))
+ if (!attr->test || attr->test(edev))
error = sysfs_create_file(&edev->kobj,&attr->attr);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 4720ba98cec3..7aa4717cdcac 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -193,6 +193,9 @@ config EFI_TEST
Say Y here to enable the runtime services support via /dev/efi_test.
If unsure, say N.
+config EFI_DEV_PATH_PARSER
+ bool
+
config APPLE_PROPERTIES
bool "Apple Device Properties"
depends on EFI_STUB && X86
@@ -255,40 +258,15 @@ config EFI_DISABLE_PCI_DMA
options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
may be used to override this option.
-endmenu
-
-config EFI_EMBEDDED_FIRMWARE
- bool
- depends on EFI
- select CRYPTO_LIB_SHA256
-
-config UEFI_CPER
- bool
-
-config UEFI_CPER_ARM
- bool
- depends on UEFI_CPER && ( ARM || ARM64 )
- default y
-
-config UEFI_CPER_X86
- bool
- depends on UEFI_CPER && X86
- default y
-
-config EFI_DEV_PATH_PARSER
- bool
- depends on ACPI
- default n
-
config EFI_EARLYCON
def_bool y
- depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+ depends on SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT
config EFI_CUSTOM_SSDT_OVERLAYS
bool "Load custom ACPI SSDT overlay from an EFI variable"
- depends on EFI && ACPI
+ depends on ACPI
default ACPI_TABLE_UPGRADE
help
Allow loading of an ACPI SSDT overlay from an EFI variable specified
@@ -314,7 +292,6 @@ config EFI_DISABLE_RUNTIME
config EFI_COCO_SECRET
bool "EFI Confidential Computing Secret Area Support"
- depends on EFI
help
Confidential Computing platforms (such as AMD SEV) allow the
Guest Owner to securely inject secrets during guest VM launch.
@@ -327,3 +304,22 @@ config EFI_COCO_SECRET
for usage inside the kernel. This will allow the
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+
+config EFI_EMBEDDED_FIRMWARE
+ bool
+ select CRYPTO_LIB_SHA256
+
+endmenu
+
+config UEFI_CPER
+ bool
+
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
+config UEFI_CPER_X86
+ bool
+ depends on UEFI_CPER && X86
+ default y
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index b14e88ccefca..05ae8bcc9d67 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -260,10 +260,10 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
EFI_MEMORY_WB);
if (status != EFI_SUCCESS) {
- efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %d\n",
+ efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %lx\n",
unprotect_start,
unprotect_start + unprotect_size,
- (int)status);
+ status);
}
}
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 8177a0fae11d..14663f671323 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -948,17 +948,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
- size_t size = 0;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
- size = pmem->size;
- break;
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
}
- gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
- pmem->vaddr = NULL;
- list_del(&pmem->node);
+ list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index f21ece56695e..7977a494a651 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -36,8 +36,16 @@
/* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */
#define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU
+/* IOCTL/QUERY feature payload size */
+#define FEATURE_PAYLOAD_SIZE 2
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK GENMASK(15, 0)
+
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
+static u32 query_features[FEATURE_PAYLOAD_SIZE];
static struct platform_device *em_dev;
@@ -167,21 +175,28 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
}
-/**
- * zynqmp_pm_feature() - Check weather given feature is supported or not
- * @api_id: API ID to check
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_feature(const u32 api_id)
+static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
{
int ret;
- u32 ret_payload[PAYLOAD_ARG_CNT];
u64 smc_arg[2];
- struct pm_api_feature_data *feature_data;
- if (!feature_check_enabled)
- return 0;
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ else
+ ret = ret_payload[1];
+
+ return ret;
+}
+
+static int do_feature_check_call(const u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ struct pm_api_feature_data *feature_data;
/* Check for existing entry in hash table for given api */
hash_for_each_possible(pm_api_features_map, feature_data, hentry,
@@ -196,23 +211,86 @@ int zynqmp_pm_feature(const u32 api_id)
return -ENOMEM;
feature_data->pm_api_id = api_id;
- smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
- smc_arg[1] = api_id;
-
- ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
- if (ret)
- ret = -EOPNOTSUPP;
- else
- ret = ret_payload[1];
+ ret = __do_feature_check_call(api_id, ret_payload);
feature_data->feature_status = ret;
hash_add(pm_api_features_map, &feature_data->hentry, api_id);
+ if (api_id == PM_IOCTL)
+ /* Store supported IOCTL IDs mask */
+ memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+ else if (api_id == PM_QUERY_DATA)
+ /* Store supported QUERY IDs mask */
+ memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
+ * zynqmp_pm_feature() - Check whether given feature is supported or not and
+ * store supported IOCTL/QUERY ID mask
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_feature(const u32 api_id)
+{
+ int ret;
+
+ if (!feature_check_enabled)
+ return 0;
+
+ ret = do_feature_check_call(api_id);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
+ * is supported or not
+ * @api_id: PM_IOCTL or PM_QUERY_DATA
+ * @id: IOCTL or QUERY function IDs
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ int ret;
+ u32 *bit_mask;
+
+ /* Input arguments validation */
+ if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA))
+ return -EINVAL;
+
+ /* Check feature check API version */
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret < 0)
+ return ret;
+
+ /* Check if feature check version 2 is supported or not */
+ if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) {
+ /*
+ * Call feature check for IOCTL/QUERY API to get IOCTL ID or
+ * QUERY ID feature status.
+ */
+ ret = do_feature_check_call(api_id);
+ if (ret < 0)
+ return ret;
+
+ bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features;
+
+ if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U)
+ return -EOPNOTSUPP;
+ } else {
+ return -ENODATA;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -1584,6 +1662,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
struct zynqmp_devinfo *devinfo;
int ret;
+ ret = get_set_conduit_method(dev->of_node);
+ if (ret)
+ return ret;
+
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
@@ -1592,11 +1674,14 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
feature_check_enabled = true;
}
- of_node_put(np);
- ret = get_set_conduit_method(dev->of_node);
- if (ret)
- return ret;
+ if (!feature_check_enabled) {
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret >= 0)
+ feature_check_enabled = true;
+ }
+
+ of_node_put(np);
devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
if (!devinfo)
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 0bff783d1b61..5935b3d0abd5 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -18,9 +18,9 @@ obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
-obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
# FPGA Bridge Drivers
obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 717ac9715970..fd1fa55c9113 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -259,6 +259,15 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
*/
bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ if (bar == FME_PORT_OFST_BAR_SKIP) {
+ continue;
+ } else if (bar >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
+ bar, i);
+ ret = -EINVAL;
+ break;
+ }
+
start = pci_resource_start(pcidev, bar) + offset;
len = pci_resource_len(pcidev, bar) - offset;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 599bb21d86af..6bff39ff21a0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -940,9 +940,12 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
{
void __iomem *base = binfo->ioaddr + ofst;
unsigned int i, ibase, inr = 0;
+ enum dfl_id_type type;
int virq;
u64 v;
+ type = feature_dev_id_type(binfo->feature_dev);
+
/*
* Ideally DFL framework should only read info from DFL header, but
* current version DFL only provides mmio resources information for
@@ -957,22 +960,25 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* code will be added. But in order to be compatible to old version
* DFL, the driver may still fall back to these quirks.
*/
- switch (fid) {
- case PORT_FEATURE_ID_UINT:
- v = readq(base + PORT_UINT_CAP);
- ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
- inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
- break;
- case PORT_FEATURE_ID_ERROR:
- v = readq(base + PORT_ERROR_CAP);
- ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
- break;
- case FME_FEATURE_ID_GLOBAL_ERR:
- v = readq(base + FME_ERROR_CAP);
- ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
- break;
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ }
}
if (!inr) {
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 53572c7aced0..06cfcd5e84bb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -89,6 +89,7 @@
#define FME_HDR_NEXT_AFU NEXT_AFU
#define FME_HDR_CAP 0x30
#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_PORT_OFST_BAR_SKIP 7
#define FME_HDR_BITSTREAM_ID 0x60
#define FME_HDR_BITSTREAM_MD 0x68
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index d49a9ce34568..a3595ecc3f79 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -148,11 +148,12 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
int ret;
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- if (!mgr->mops->initial_header_size)
+ if (!mgr->mops->initial_header_size) {
ret = fpga_mgr_write_init(mgr, info, NULL, 0);
- else
- ret = fpga_mgr_write_init(
- mgr, info, buf, min(mgr->mops->initial_header_size, count));
+ } else {
+ count = min(mgr->mops->initial_header_size, count);
+ ret = fpga_mgr_write_init(mgr, info, buf, count);
+ }
if (ret) {
dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
@@ -730,6 +731,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
*
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
* This is the devres variant of fpga_mgr_register_full() for which the unregister
* function will be called automatically when the managing device is detached.
*/
@@ -763,6 +766,8 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
* This is the devres variant of fpga_mgr_register() for which the
* unregister function will be called automatically when the managing
* device is detached.
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index b0ac18de4885..485948e3c0db 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -18,9 +18,9 @@
static DEFINE_IDA(fpga_region_ida);
static struct class *fpga_region_class;
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *))
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *))
{
struct device *dev;
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 50b83057c048..ae82532fc127 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -28,7 +28,7 @@ MODULE_DEVICE_TABLE(of, fpga_region_of_match);
*
* Caller will need to put_device(&region->dev) when done.
*
- * Returns FPGA Region struct or NULL
+ * Return: FPGA Region struct or NULL
*/
static struct fpga_region *of_fpga_region_find(struct device_node *np)
{
@@ -80,7 +80,7 @@ static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
* Caller should call fpga_bridges_put(&region->bridge_list) when
* done with the bridges.
*
- * Return 0 for success (even if there are no bridges specified)
+ * Return: 0 for success (even if there are no bridges specified)
* or -EBUSY if any of the bridges are in use.
*/
static int of_fpga_region_get_bridges(struct fpga_region *region)
@@ -139,13 +139,13 @@ static int of_fpga_region_get_bridges(struct fpga_region *region)
}
/**
- * child_regions_with_firmware
+ * child_regions_with_firmware - Used to check the child region info.
* @overlay: device node of the overlay
*
* If the overlay adds child FPGA regions, they are not allowed to have
* firmware-name property.
*
- * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name.
*/
static int child_regions_with_firmware(struct device_node *overlay)
{
@@ -184,14 +184,14 @@ static int child_regions_with_firmware(struct device_node *overlay)
* Given an overlay applied to an FPGA region, parse the FPGA image specific
* info in the overlay and do some checking.
*
- * Returns:
+ * Return:
* NULL if overlay doesn't direct us to program the FPGA.
* fpga_image_info struct if there is an image to program.
* error code for invalid overlay.
*/
-static struct fpga_image_info *of_fpga_region_parse_ov(
- struct fpga_region *region,
- struct device_node *overlay)
+static struct fpga_image_info *
+of_fpga_region_parse_ov(struct fpga_region *region,
+ struct device_node *overlay)
{
struct device *dev = &region->dev;
struct fpga_image_info *info;
@@ -279,7 +279,7 @@ ret_no_info:
* If the checks fail, overlay is rejected and does not get added to the
* live tree.
*
- * Returns 0 for success or negative error code for failure.
+ * Return: 0 for success or negative error code for failure.
*/
static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
struct of_overlay_notify_data *nd)
@@ -339,7 +339,7 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region,
* This notifier handles programming an FPGA when a "firmware-name" property is
* added to an fpga-region.
*
- * Returns NOTIFY_OK or error if FPGA programming fails.
+ * Return: NOTIFY_OK or error if FPGA programming fails.
*/
static int of_fpga_region_notify(struct notifier_block *nb,
unsigned long action, void *arg)
@@ -446,6 +446,8 @@ static struct platform_driver of_fpga_region_driver = {
/**
* of_fpga_region_init - init function for fpga_region class
* Creates the fpga_region class and registers a reconfig notifier.
+ *
+ * Return: 0 on success, negative error code otherwise.
*/
static int __init of_fpga_region_init(void)
{
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index f1e4ac90e7d3..e388e75103f4 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -406,12 +406,6 @@ static int adp5588_gpio_probe(struct i2c_client *client)
if (ret)
return ret;
- if (pdata && pdata->setup) {
- ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
- if (ret < 0)
- dev_warn(&client->dev, "setup failed, %d\n", ret);
- }
-
i2c_set_clientdata(client, dev);
return 0;
@@ -419,20 +413,7 @@ static int adp5588_gpio_probe(struct i2c_client *client)
static int adp5588_gpio_remove(struct i2c_client *client)
{
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct adp5588_gpio *dev = i2c_get_clientdata(client);
- int ret;
-
- if (pdata && pdata->teardown) {
- ret = pdata->teardown(client,
- dev->gpio_chip.base, dev->gpio_chip.ngpio,
- pdata->context);
- if (ret < 0) {
- dev_err(&client->dev, "teardown failed %d\n", ret);
- return ret;
- }
- }
if (dev->client->irq)
free_irq(dev->client->irq, dev);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index b444c6ab958b..08bc52c3cdcb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1120,20 +1120,21 @@ static int pca953x_regcache_sync(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
+ u8 regaddr;
/*
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1141,16 +1142,18 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
- PCAL953X_IN_LATCH + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
- PCAL953X_INT_MASK + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 84c4f1e9fb0c..de28a68daea0 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2017 NVIDIA Corporation
+ * Copyright (c) 2016-2022 NVIDIA Corporation
*
* Author: Thierry Reding <treding@nvidia.com>
+ * Dipen Patel <dpatel@nvidia.com>
*/
#include <linux/gpio/driver.h>
@@ -11,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/hte.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
@@ -36,6 +38,7 @@
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
#define TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5)
#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC BIT(7)
#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
#define TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff)
@@ -76,6 +79,7 @@ struct tegra_gpio_soc {
const struct tegra186_pin_range *pin_ranges;
unsigned int num_pin_ranges;
const char *pinmux;
+ bool has_gte;
};
struct tegra_gpio {
@@ -193,6 +197,76 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+#define HTE_BOTH_EDGES (HTE_RISING_EDGE_TS | HTE_FALLING_EDGE_TS)
+
+static int tegra186_gpio_en_hw_ts(struct gpio_chip *gc, u32 offset,
+ unsigned long flags)
+{
+ struct tegra_gpio *gpio;
+ void __iomem *base;
+ int value;
+
+ if (!gc)
+ return -EINVAL;
+
+ gpio = gpiochip_get_data(gc);
+ if (!gpio)
+ return -ENODEV;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+
+ if (flags == HTE_BOTH_EDGES) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ } else if (flags == HTE_RISING_EDGE_TS) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ } else if (flags == HTE_FALLING_EDGE_TS) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ }
+
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_dis_hw_ts(struct gpio_chip *gc, u32 offset,
+ unsigned long flags)
+{
+ struct tegra_gpio *gpio;
+ void __iomem *base;
+ int value;
+
+ if (!gc)
+ return -EINVAL;
+
+ gpio = gpiochip_get_data(gc);
+ if (!gpio)
+ return -ENODEV;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+ if (flags == HTE_BOTH_EDGES) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ } else if (flags == HTE_RISING_EDGE_TS) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ } else if (flags == HTE_FALLING_EDGE_TS) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ }
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
@@ -747,6 +821,10 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
+ if (gpio->soc->has_gte) {
+ gpio->gpio.en_hw_timestamp = tegra186_gpio_en_hw_ts;
+ gpio->gpio.dis_hw_timestamp = tegra186_gpio_dis_hw_ts;
+ }
gpio->gpio.base = -1;
@@ -991,6 +1069,7 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
.name = "tegra194-gpio-aon",
.instance = 1,
.num_irqs_per_bank = 8,
+ .has_gte = true,
};
#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c2900b1be69d..f5aa5f93342a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -24,6 +24,7 @@
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/hte.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -464,6 +465,25 @@ struct line {
* stale value.
*/
unsigned int level;
+ /*
+ * -- hte specific fields --
+ */
+ struct hte_ts_desc hdesc;
+ /*
+ * HTE provider sets line level at the time of event. The valid
+ * value is 0 or 1 and negative value for an error.
+ */
+ int raw_level;
+ /*
+ * when sw_debounce is set on HTE enabled line, this is running
+ * counter of the discarded events.
+ */
+ u32 total_discard_seq;
+ /*
+ * when sw_debounce is set on HTE enabled line, this variable records
+ * last sequence number before debounce period expires.
+ */
+ u32 last_seqno;
};
/**
@@ -518,6 +538,7 @@ struct linereq {
GPIO_V2_LINE_DRIVE_FLAGS | \
GPIO_V2_LINE_EDGE_FLAGS | \
GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
static void linereq_put_event(struct linereq *lr,
@@ -542,10 +563,98 @@ static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ return line->timestamp_ns;
return ktime_get_ns();
}
+static enum hte_return process_hw_ts_thread(void *p)
+{
+ struct line *line;
+ struct linereq *lr;
+ struct gpio_v2_line_event le;
+ int level;
+ u64 eflags;
+
+ if (!p)
+ return HTE_CB_HANDLED;
+
+ line = p;
+ lr = line->req;
+
+ memset(&le, 0, sizeof(le));
+
+ le.timestamp_ns = line->timestamp_ns;
+ eflags = READ_ONCE(line->eflags);
+
+ if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
+ if (line->raw_level >= 0) {
+ if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ level = !line->raw_level;
+ else
+ level = line->raw_level;
+ } else {
+ level = gpiod_get_value_cansleep(line->desc);
+ }
+
+ if (level)
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ else
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+ /* Emit low-to-high event */
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+ /* Emit high-to-low event */
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else {
+ return HTE_CB_HANDLED;
+ }
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
+ le.offset = gpio_chip_hwgpio(line->desc);
+
+ linereq_put_event(lr, &le);
+
+ return HTE_CB_HANDLED;
+}
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+ struct line *line;
+ struct linereq *lr;
+ int diff_seqno = 0;
+
+ if (!ts || !p)
+ return HTE_CB_HANDLED;
+
+ line = p;
+ line->timestamp_ns = ts->tsc;
+ line->raw_level = ts->raw_level;
+ lr = line->req;
+
+ if (READ_ONCE(line->sw_debounced)) {
+ line->total_discard_seq++;
+ line->last_seqno = ts->seq;
+ mod_delayed_work(system_wq, &line->work,
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+ } else {
+ if (unlikely(ts->seq < line->line_seqno))
+ return HTE_CB_HANDLED;
+
+ diff_seqno = ts->seq - line->line_seqno;
+ line->line_seqno = ts->seq;
+ if (lr->num_lines != 1)
+ line->req_seqno = atomic_add_return(diff_seqno,
+ &lr->seqno);
+
+ return HTE_RUN_SECOND_CB;
+ }
+
+ return HTE_CB_HANDLED;
+}
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
@@ -651,10 +760,16 @@ static void debounce_work_func(struct work_struct *work)
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
- int level;
+ int level, diff_seqno;
u64 eflags;
- level = gpiod_get_raw_value_cansleep(line->desc);
+ if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ level = line->raw_level;
+ if (level < 0)
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ } else {
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ }
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
@@ -685,10 +800,21 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
- line->line_seqno++;
- le.line_seqno = line->line_seqno;
- le.seqno = (lr->num_lines == 1) ?
- le.line_seqno : atomic_inc_return(&lr->seqno);
+ if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ /* discard events except the last one */
+ line->total_discard_seq -= 1;
+ diff_seqno = line->last_seqno - line->total_discard_seq -
+ line->line_seqno;
+ line->line_seqno = line->last_seqno - line->total_discard_seq;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ?
+ le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
+ } else {
+ line->line_seqno++;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ?
+ le.line_seqno : atomic_inc_return(&lr->seqno);
+ }
if (level)
/* Emit low-to-high event */
@@ -700,8 +826,34 @@ static void debounce_work_func(struct work_struct *work)
linereq_put_event(lr, &le);
}
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ int ret;
+ unsigned long flags = 0;
+ struct hte_ts_desc *hdesc = &line->hdesc;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
+
+ line->total_discard_seq = 0;
+
+ hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
+ NULL, line->desc);
+
+ ret = hte_ts_get(NULL, hdesc, 0);
+ if (ret)
+ return ret;
+
+ return hte_request_ts_ns(hdesc, process_hw_ts,
+ process_hw_ts_thread, line);
+}
+
static int debounce_setup(struct line *line,
- unsigned int debounce_period_us)
+ unsigned int debounce_period_us, bool hte_req)
{
unsigned long irqflags;
int ret, level, irq;
@@ -721,19 +873,27 @@ static int debounce_setup(struct line *line,
if (level < 0)
return level;
- irq = gpiod_to_irq(line->desc);
- if (irq < 0)
- return -ENXIO;
+ if (!hte_req) {
+ irq = gpiod_to_irq(line->desc);
+ if (irq < 0)
+ return -ENXIO;
- WRITE_ONCE(line->level, level);
- irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
- ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
- return ret;
+ irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ ret = request_irq(irq, debounce_irq_handler, irqflags,
+ line->req->label, line);
+ if (ret)
+ return ret;
+ line->irq = irq;
+ } else {
+ ret = hte_edge_setup(line,
+ GPIO_V2_LINE_FLAG_EDGE_RISING |
+ GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ if (ret)
+ return ret;
+ }
+ WRITE_ONCE(line->level, level);
WRITE_ONCE(line->sw_debounced, 1);
- line->irq = irq;
}
return 0;
}
@@ -766,13 +926,16 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
return 0;
}
-static void edge_detector_stop(struct line *line)
+static void edge_detector_stop(struct line *line, bool hte_en)
{
- if (line->irq) {
+ if (line->irq && !hte_en) {
free_irq(line->irq, line);
line->irq = 0;
}
+ if (hte_en)
+ hte_ts_put(&line->hdesc);
+
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
@@ -784,7 +947,7 @@ static void edge_detector_stop(struct line *line)
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx,
- u64 eflags)
+ u64 eflags, bool hte_req)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
@@ -799,7 +962,7 @@ static int edge_detector_setup(struct line *line,
WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
- ret = debounce_setup(line, debounce_period_us);
+ ret = debounce_setup(line, debounce_period_us, hte_req);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -809,6 +972,9 @@ static int edge_detector_setup(struct line *line,
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
+ if (hte_req)
+ return hte_edge_setup(line, eflags);
+
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -834,13 +1000,18 @@ static int edge_detector_setup(struct line *line,
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx,
- u64 eflags, bool polarity_change)
+ u64 flags, bool polarity_change,
+ bool prev_hte_flag)
{
+ u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
unsigned int debounce_period_us =
- gpio_v2_line_config_debounce_period(lc, line_idx);
+ gpio_v2_line_config_debounce_period(lc, line_idx);
+ bool hte_change = (prev_hte_flag !=
+ ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
+ && !hte_change)
return 0;
/* sw debounced and still will be...*/
@@ -851,11 +1022,12 @@ static int edge_detector_update(struct line *line,
}
/* reconfiguring edge detection or sw debounce being disabled */
- if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
- edge_detector_stop(line);
+ edge_detector_stop(line, prev_hte_flag);
- return edge_detector_setup(line, lc, line_idx, eflags);
+ return edge_detector_setup(line, lc, line_idx, eflags,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -891,7 +1063,6 @@ static int gpio_v2_line_flags_validate(u64 flags)
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
-
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
@@ -900,6 +1071,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
(flags & GPIO_V2_LINE_FLAG_OUTPUT))
return -EINVAL;
+ /* Only allow one event clock source */
+ if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return -EINVAL;
+
/* Edge detection requires explicit input. */
if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
!(flags & GPIO_V2_LINE_FLAG_INPUT))
@@ -992,6 +1168,8 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+ assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1121,6 +1299,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
unsigned int i;
u64 flags;
bool polarity_change;
+ bool prev_hte_flag;
int ret;
for (i = 0; i < lr->num_lines; i++) {
@@ -1130,6 +1309,8 @@ static long linereq_set_config_unlocked(struct linereq *lr,
(!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
+ prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
+
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
/*
* Lines have to be requested explicitly for input
@@ -1138,7 +1319,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- edge_detector_stop(&lr->lines[i]);
+ edge_detector_stop(&lr->lines[i], prev_hte_flag);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
@@ -1148,8 +1329,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
return ret;
ret = edge_detector_update(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS,
- polarity_change);
+ flags, polarity_change, prev_hte_flag);
if (ret)
return ret;
}
@@ -1280,9 +1460,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
+ bool hte;
for (i = 0; i < lr->num_lines; i++) {
- edge_detector_stop(&lr->lines[i]);
+ hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
+ &lr->lines[i].desc->flags);
+ edge_detector_stop(&lr->lines[i], hte);
if (lr->lines[i].desc)
gpiod_free(lr->lines[i].desc);
}
@@ -1408,7 +1591,8 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS);
+ flags & GPIO_V2_LINE_EDGE_FLAGS,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
if (ret)
goto out_free_linereq;
}
@@ -1961,6 +2145,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
if (debounce_period_us) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9fff4f464ca3..9535f48e18d1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2454,6 +2454,64 @@ set_output_flag:
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
+ * gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
+ *
+ * @desc: GPIO to enable.
+ * @flags: Flags related to GPIO edge.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+ int ret = 0;
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+
+ gc = desc->gdev->chip;
+ if (!gc->en_hw_timestamp) {
+ gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ ret = gc->en_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ if (ret)
+ gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
+
+/**
+ * gpiod_disable_hw_timestamp_ns - Disable hardware timestamp.
+ *
+ * @desc: GPIO to disable.
+ * @flags: Flags related to GPIO edge, same value as used during enable call.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+ int ret = 0;
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+
+ gc = desc->gdev->chip;
+ if (!gc->dis_hw_timestamp) {
+ gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ ret = gc->dis_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ if (ret)
+ gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+
+/**
* gpiod_set_config - sets @config for a GPIO
* @desc: descriptor of the GPIO for which to set the configuration
* @config: Same packed config format as generic pinconf
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index eef3ec073d9e..d900ecdbac46 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -161,6 +161,7 @@ struct gpio_desc {
#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 835c88318cec..8997f0096545 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -2,7 +2,6 @@
# drm/tegra depends on host1x, so if both drivers are built-in care must be
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
-obj-$(CONFIG_TEGRA_HOST1X) += host1x/
-obj-y += drm/ vga/
+obj-y += host1x/ drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 8b5452a8d330..67abf8dcd30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1621,7 +1621,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mutex_lock(&mem->lock);
- /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
+ /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
if (mem->alloc_flags &
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 63e0293edc5f..fd8f3731758e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -188,13 +188,17 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
vram_type = AMDGPU_VRAM_TYPE_DDR3;
break;
case Ddr4MemType:
- case LpDdr4MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR4;
break;
+ case LpDdr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
+ break;
case Ddr5MemType:
- case LpDdr5MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR5;
break;
+ case LpDdr5MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e552a2004868..b28af04b0c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
@@ -1252,7 +1252,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p->fence = dma_fence_get(&job->base.s_fence->finished);
- amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
+ seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
amdgpu_cs_post_dependencies(p);
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c317078d1afd..7dc92ef36b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -135,9 +135,9 @@ static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_
static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
- struct amdgpu_device *adev = ctx->adev;
- int32_t ctx_prio;
+ struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
+ int32_t ctx_prio;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
@@ -162,17 +162,50 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
return hw_prio;
}
+/* Calculate the time spend on the hw */
+static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
+{
+ struct drm_sched_fence *s_fence;
+
+ if (!fence)
+ return ns_to_ktime(0);
+
+ /* When the fence is not even scheduled it can't have spend time */
+ s_fence = to_drm_sched_fence(fence);
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
+ return ns_to_ktime(0);
+
+ /* When it is still running account how much already spend */
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
+ return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
+
+ return ktime_sub(s_fence->finished.timestamp,
+ s_fence->scheduled.timestamp);
+}
+
+static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *centity)
+{
+ ktime_t res = ns_to_ktime(0);
+ uint32_t i;
+
+ spin_lock(&ctx->ring_lock);
+ for (i = 0; i < amdgpu_sched_jobs; i++) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
+ }
+ spin_unlock(&ctx->ring_lock);
+ return res;
+}
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring)
{
- struct amdgpu_device *adev = ctx->adev;
- struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
- unsigned num_scheds = 0;
- int32_t ctx_prio;
- unsigned int hw_prio;
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ struct amdgpu_ctx_entity *entity;
enum drm_sched_priority drm_prio;
+ unsigned int hw_prio, num_scheds;
+ int32_t ctx_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -182,6 +215,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
+ entity->hw_ip = hw_ip;
entity->sequence = 1;
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
@@ -220,10 +254,25 @@ error_free_entity:
return r;
}
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
- int32_t priority,
- struct drm_file *filp,
- struct amdgpu_ctx *ctx)
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+ ktime_t res = ns_to_ktime(0);
+ int i;
+
+ if (!entity)
+ return res;
+
+ for (i = 0; i < amdgpu_sched_jobs; ++i) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
+ dma_fence_put(entity->fences[i]);
+ }
+
+ kfree(entity);
+ return res;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
+ struct drm_file *filp, struct amdgpu_ctx *ctx)
{
int r;
@@ -233,15 +282,14 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
-
kref_init(&ctx->refcount);
+ ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
- ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
- ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
@@ -249,24 +297,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
-{
-
- int i;
-
- if (!entity)
- return;
-
- for (i = 0; i < amdgpu_sched_jobs; ++i)
- dma_fence_put(entity->fences[i]);
-
- kfree(entity);
-}
-
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
u32 *stable_pstate)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level current_level;
current_level = amdgpu_dpm_get_performance_level(adev);
@@ -294,7 +328,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
u32 stable_pstate)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level level;
u32 current_stable_pstate;
int r;
@@ -345,7 +379,8 @@ done:
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_ctx_mgr *mgr = ctx->mgr;
+ struct amdgpu_device *adev = mgr->adev;
unsigned i, j, idx;
if (!adev)
@@ -353,8 +388,10 @@ static void amdgpu_ctx_fini(struct kref *ref)
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
- amdgpu_ctx_fini_entity(ctx->entities[i][j]);
- ctx->entities[i][j] = NULL;
+ ktime_t spend;
+
+ spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
}
}
@@ -421,7 +458,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, priority, filp, ctx);
+ r = amdgpu_ctx_init(mgr, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -671,9 +708,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
- struct drm_sched_entity *entity,
- struct dma_fence *fence, uint64_t *handle)
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ struct dma_fence *fence)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
uint64_t seq = centity->sequence;
@@ -682,8 +719,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
idx = seq & (amdgpu_sched_jobs - 1);
other = centity->fences[idx];
- if (other)
- BUG_ON(!dma_fence_is_signaled(other));
+ WARN_ON(other && !dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -692,9 +728,11 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
centity->sequence++;
spin_unlock(&ctx->ring_lock);
+ atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
+ &ctx->mgr->time_spend[centity->hw_ip]);
+
dma_fence_put(other);
- if (handle)
- *handle = seq;
+ return seq;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -731,7 +769,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
int hw_ip,
int32_t priority)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
@@ -796,10 +834,17 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
return r;
}
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev)
{
+ unsigned int i;
+
+ mgr->adev = adev;
mutex_init(&mgr->lock);
idr_init(&mgr->ctx_handles);
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ atomic64_set(&mgr->time_spend[i], 0);
}
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
@@ -875,80 +920,38 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
-static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx,
- struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max)
-{
- ktime_t now, t1;
- uint32_t i;
-
- *total = *max = 0;
-
- now = ktime_get();
- for (i = 0; i < amdgpu_sched_jobs; i++) {
- struct dma_fence *fence;
- struct drm_sched_fence *s_fence;
-
- spin_lock(&ctx->ring_lock);
- fence = dma_fence_get(centity->fences[i]);
- spin_unlock(&ctx->ring_lock);
- if (!fence)
- continue;
- s_fence = to_drm_sched_fence(fence);
- if (!dma_fence_is_signaled(&s_fence->scheduled)) {
- dma_fence_put(fence);
- continue;
- }
- t1 = s_fence->scheduled.timestamp;
- if (!ktime_before(t1, now)) {
- dma_fence_put(fence);
- continue;
- }
- if (dma_fence_is_signaled(&s_fence->finished) &&
- s_fence->finished.timestamp < now)
- *total += ktime_sub(s_fence->finished.timestamp, t1);
- else
- *total += ktime_sub(now, t1);
- t1 = ktime_sub(now, t1);
- dma_fence_put(fence);
- *max = max(t1, *max);
- }
-}
-
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
- uint32_t idx, uint64_t *elapsed)
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM])
{
- struct idr *idp;
struct amdgpu_ctx *ctx;
+ unsigned int hw_ip, i;
uint32_t id;
- struct amdgpu_ctx_entity *centity;
- ktime_t total = 0, max = 0;
- if (idx >= AMDGPU_MAX_ENTITY_NUM)
- return 0;
- idp = &mgr->ctx_handles;
+ /*
+ * This is a little bit racy because it can be that a ctx or a fence are
+ * destroyed just in the moment we try to account them. But that is ok
+ * since exactly that case is explicitely allowed by the interface.
+ */
mutex_lock(&mgr->lock);
- idr_for_each_entry(idp, ctx, id) {
- ktime_t ttotal, tmax;
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
- if (!ctx->entities[hwip][idx])
- continue;
-
- centity = ctx->entities[hwip][idx];
- amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax);
+ usage[hw_ip] = ns_to_ktime(ns);
+ }
- /* Harmonic mean approximation diverges for very small
- * values. If ratio < 0.01% ignore
- */
- if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal))
- continue;
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
+ struct amdgpu_ctx_entity *centity;
+ ktime_t spend;
- total = ktime_add(total, ttotal);
- max = ktime_after(tmax, max) ? tmax : max;
+ centity = ctx->entities[hw_ip][i];
+ if (!centity)
+ continue;
+ spend = amdgpu_ctx_entity_time(ctx, centity);
+ usage[hw_ip] = ktime_add(usage[hw_ip], spend);
+ }
+ }
}
-
mutex_unlock(&mgr->lock);
- if (elapsed)
- *elapsed = max;
-
- return total;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 142f2f87d44c..cc7c8afff414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -23,16 +23,20 @@
#ifndef __AMDGPU_CTX_H__
#define __AMDGPU_CTX_H__
+#include <linux/ktime.h>
+#include <linux/types.h>
+
#include "amdgpu_ring.h"
struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
+struct amdgpu_ctx_mgr;
#define AMDGPU_MAX_ENTITY_NUM 4
-#define AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(max, total) ((max) > 16384ULL*(total))
struct amdgpu_ctx_entity {
+ uint32_t hw_ip;
uint64_t sequence;
struct drm_sched_entity entity;
struct dma_fence *fences[];
@@ -40,7 +44,7 @@ struct amdgpu_ctx_entity {
struct amdgpu_ctx {
struct kref refcount;
- struct amdgpu_device *adev;
+ struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
unsigned reset_counter_query;
uint32_t vram_lost_counter;
@@ -61,6 +65,7 @@ struct amdgpu_ctx_mgr {
struct mutex lock;
/* protected by lock */
struct idr ctx_handles;
+ atomic64_t time_spend[AMDGPU_HW_IP_NUM];
};
extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
@@ -70,9 +75,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity);
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
- struct drm_sched_entity *entity,
- struct dma_fence *fence, uint64_t *seq);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ struct dma_fence *fence);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq);
@@ -85,10 +90,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity);
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
- uint32_t idx, uint64_t *elapsed);
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM]);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9af8d7a1d011..625424f3082b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1556,9 +1556,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_gmc_tmz_set(adev);
-
-
return 0;
}
@@ -3701,6 +3698,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Enable TMZ based on IP_VERSION */
+ amdgpu_gmc_tmz_set(adev);
+
amdgpu_gmc_noretry_set(adev);
/* Need to get xgmi info early to decide the reset behavior*/
if (adev->gmc.xgmi.supported) {
@@ -5219,6 +5219,10 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
r = amdgpu_device_reset_sriov(adev, job ? false : true);
if (r)
adev->asic_reset_res = r;
+
+ /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
if (r && r == -EAGAIN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 881570dced41..47f0344205ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1130,13 +1130,24 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
ip->revision & 0xc0;
ip->revision &= ~0xc0;
- adev->vcn.num_vcn_inst++;
+ if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
+ adev->vcn.num_vcn_inst++;
+ else
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
- le16_to_cpu(ip->hw_id) == SDMA3_HWID)
- adev->sdma.num_instances++;
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
+ adev->sdma.num_instances++;
+ else
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
if (le16_to_cpu(ip->hw_id) == UMC_HWID)
adev->gmc.num_umc++;
@@ -1361,7 +1372,7 @@ union mall_info {
struct mall_info_v1_0 v1;
};
-int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union mall_info *mall_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8592d43a79b0..8890300766a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -99,10 +99,11 @@
* - 3.43.0 - Add device hot plug/unplug support
* - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
* - 3.45.0 - Add context ioctl stable pstate interface
- * * 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * - 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * * 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 46
+#define KMS_DRIVER_MINOR 47
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -1940,6 +1941,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{ PCI_DEVICE(0x1002, PCI_ANY_ID),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 5a6857c44bb6..99a7855ab1bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -32,6 +32,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -54,58 +55,49 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
{
- struct amdgpu_fpriv *fpriv;
- uint32_t bus, dev, fn, i, domain;
- uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
- struct amdgpu_bo *root;
+ struct amdgpu_fpriv *fpriv = file->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+
+ uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ ktime_t usage[AMDGPU_HW_IP_NUM];
+ uint32_t bus, dev, fn, domain;
+ unsigned int hw_ip;
int ret;
- ret = amdgpu_file_to_fpriv(f, &fpriv);
- if (ret)
- return;
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- root = amdgpu_bo_ref(fpriv->vm.root.bo);
- if (!root)
+ ret = amdgpu_bo_reserve(vm->root.bo, false);
+ if (ret)
return;
- ret = amdgpu_bo_reserve(root, false);
- if (ret) {
- DRM_ERROR("Fail to reserve bo\n");
- return;
- }
- amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(root);
- amdgpu_bo_unref(&root);
+ amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_bo_unreserve(vm->root.bo);
- seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
- dev, fn, fpriv->vm.pasid);
- seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
- seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL);
- seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL);
- for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
- uint32_t count = amdgpu_ctx_num_entities[i];
- int idx = 0;
- uint64_t total = 0, min = 0;
- uint32_t perc, frac;
+ amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
- for (idx = 0; idx < count; idx++) {
- total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr,
- i, idx, &min);
- if ((total == 0) || (min == 0))
- continue;
+ /*
+ * ******************************************************************
+ * For text output format description please see drm-usage-stats.rst!
+ * ******************************************************************
+ */
- perc = div64_u64(10000 * total, min);
- frac = perc % 100;
+ seq_printf(m, "pasid:\t%u\n", fpriv->vm.pasid);
+ seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
+ seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
+ seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
+ seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
+ seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
+ seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ if (!usage[hw_ip])
+ continue;
- seq_printf(m, "%s%d:\t%d.%d%%\n",
- amdgpu_ip_name[i],
- idx, perc/100, frac);
- }
+ seq_printf(m, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
+ ktime_to_ns(usage[hw_ip]));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 652571267077..8ef31d687ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -296,8 +296,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED))
-
+ AMDGPU_GEM_CREATE_ENCRYPTED |
+ AMDGPU_GEM_CREATE_DISCARDABLE))
return -EINVAL;
/* reject invalid gem domains */
@@ -645,6 +645,8 @@ uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
+ if (flags & AMDGPU_VM_PAGE_NOALLOC)
+ pte_flag |= AMDGPU_PTE_NOALLOC;
if (adev->gmc.gmc_funcs->map_mtype)
pte_flag |= amdgpu_gmc_map_mtype(adev,
@@ -658,7 +660,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
{
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
+ AMDGPU_VM_PAGE_NOALLOC;
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_PRT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 88b852b3a2cb..798c56214a23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -512,9 +512,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
*/
void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ /* RAVEN */
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ /* RENOIR looks like RAVEN */
+ case IP_VERSION(9, 3, 0):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -525,12 +528,20 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
"Trusted Memory Zone (TMZ) feature enabled\n");
}
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- case CHIP_IP_DISCOVERY:
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ /* VANGOGH */
+ case IP_VERSION(10, 3, 1):
+ /* YELLOW_CARP*/
+ case IP_VERSION(10, 3, 3):
+ /* GC 10.3.7 */
+ case IP_VERSION(10, 3, 7):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 497478f8a5d3..801f6fa692e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1152,7 +1152,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
- amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+ amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
goto out_suspend;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5444515c1476..2c82b1d5a0d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -567,6 +567,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bp->domain;
bo->allowed_domains = bo->preferred_domains;
if (bp->type != ttm_bo_type_kernel &&
+ !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -1018,7 +1019,9 @@ static const char *amdgpu_vram_names[] = {
"DDR3",
"DDR4",
"GDDR6",
- "DDR5"
+ "DDR5",
+ "LPDDR4",
+ "LPDDR5"
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 4c9cbdc66995..147b79c10cbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,7 +41,6 @@
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
-#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62)
#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 214e4e89a028..e9411c28d88b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1177,7 +1177,7 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->xgmi_context.context.initialized) {
+ if (!psp->xgmi_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 035891ec59d5..2de9309a4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -726,7 +726,9 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
- if (!amdgpu_ras_intr_triggered()) {
+ /* Only enable ras feature operation handle on host side */
+ if (!amdgpu_sriov_vf(adev) &&
+ !amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
@@ -1523,7 +1525,9 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
*/
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
+ /* Fatal error events are handled on host side */
+ if (amdgpu_sriov_vf(adev) ||
+ !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
return;
if (adev->nbio.ras &&
@@ -2270,10 +2274,14 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
adev->ras_hw_enabled = adev->ras_enabled = 0;
- if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
+ if (!adev->is_atom_fw ||
!amdgpu_ras_asic_supported(adev))
return;
+ if (!(amdgpu_sriov_vf(adev) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
+ return;
+
if (!adev->gmc.xgmi.connected_to_cpu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
@@ -2285,15 +2293,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
- adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
-
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ } else {
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__GFX);
+ }
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
@@ -2637,6 +2651,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
struct amdgpu_ras_block_object *obj;
int r;
+ /* Guest side doesn't need init ras feature */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 8e221a1ba937..42c1f050542f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -124,6 +124,10 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (amdgpu_sriov_vf(adev))
+ return AMDGPU_RAS_SUCCESS;
+
amdgpu_ras_reset_gpu(adev);
return AMDGPU_RAS_SUCCESS;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ec26edd4f4d8..be6f76a30ac6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -117,7 +117,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
abo = ttm_to_amdgpu_bo(bo);
- if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
+ if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
placement->num_placement = 0;
placement->num_busy_placement = 0;
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 65a4126135b0..c5f46d264b23 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -5111,7 +5111,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -6898,7 +6898,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -6919,23 +6919,6 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- tmp = 0;
- /* enable the doorbell if requested */
- if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_OFFSET, prop->doorbell_index);
-
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_SOURCE, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_HIT, 0);
- }
-
- mqd->cp_hqd_pq_doorbell_control = tmp;
-
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
@@ -6973,20 +6956,6 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- /* write the EOP addr */
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
- mqd->cp_hqd_eop_base_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
- mqd->cp_hqd_eop_base_addr_hi);
-
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
- mqd->cp_hqd_eop_control);
-
- /* enable doorbell? */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
- mqd->cp_hqd_pq_doorbell_control);
-
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
@@ -7005,6 +6974,19 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_wptr_hi);
}
+ /* disable doorbells */
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ /* write the EOP addr */
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
+ mqd->cp_hqd_eop_base_addr_lo);
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
+ mqd->cp_hqd_eop_base_addr_hi);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
+ mqd->cp_hqd_eop_control);
+
/* set the pointer to the MQD */
WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 8773cbd1f03b..8c0a3fc7aaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4082,7 +4082,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 90f64219d291..7f0b18b0d4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3714,7 +3714,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -4490,7 +4490,7 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -5815,7 +5815,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* write cmd to Set CGCG Overrride */
+ /* write cmd to Set CGCG Override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 83639b5ea6a9..5349ca4d19e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2535,7 +2535,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -3514,7 +3514,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -3535,23 +3535,6 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- tmp = 0;
- /* enable the doorbell if requested */
- if (ring->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_OFFSET, ring->doorbell_index);
-
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_SOURCE, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_HIT, 0);
- }
-
- mqd->cp_hqd_pq_doorbell_control = tmp;
-
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index b8c79789e1e4..9077dfccaf3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -613,6 +613,9 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ *flags &= ~AMDGPU_PTE_NOALLOC;
+ *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
if (mapping->flags & AMDGPU_PTE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 477f67d9b07c..a0c0b7d9f444 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -500,6 +500,9 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ *flags &= ~AMDGPU_PTE_NOALLOC;
+ *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
if (mapping->flags & AMDGPU_PTE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index d6d79e97def9..9e1ef81933ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -32,13 +32,10 @@
MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_cap.bin");
-MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_5_asd.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_8_asd.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
@@ -85,17 +82,17 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
- err = psp_init_ta_microcode(&adev->psp, chip_name);
- if (err)
- return err;
+ /* It's not necessary to load ras ta on Guest side */
+ if (!amdgpu_sriov_vf(adev)) {
+ err = psp_init_ta_microcode(&adev->psp, chip_name);
+ if (err)
+ return err;
+ }
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
- err = psp_init_asd_microcode(psp, chip_name);
- if (err)
- return err;
err = psp_init_toc_microcode(psp, chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index c6a8520053bb..9e18a2b22607 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -42,6 +42,7 @@
#include "soc15.h"
#include "soc15_common.h"
+#include "soc21.h"
static const struct amd_ip_funcs soc21_common_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 475f89700c74..60a81649cf12 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -166,7 +166,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200cd, 0xbef8007e,
+ 0xbf8200ce, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -212,304 +212,310 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x761e0000, 0xe0524100,
0x761e0100, 0xe0524200,
0x761e0200, 0xe0524300,
- 0x761e0300, 0xb8f22a05,
- 0x80728172, 0x8e728a72,
- 0xb8f61605, 0x80768176,
- 0x8e768676, 0x80727672,
- 0x80f2c072, 0xb8f31605,
- 0x80738173, 0x8e738473,
- 0x8e7a8273, 0xbefa00ff,
- 0x01000000, 0xbefc0073,
- 0xc031003c, 0x00000072,
- 0x80f2c072, 0xbf8c007f,
- 0x80fc907c, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff1, 0xb8f22a05,
- 0x80728172, 0x8e728a72,
- 0xb8f61605, 0x80768176,
- 0x8e768676, 0x80727672,
- 0xbefa0084, 0xbefa00ff,
- 0x01000000, 0xc0211cfc,
+ 0x761e0300, 0xbf8c0f70,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0x80f2c072,
+ 0xb8f31605, 0x80738173,
+ 0x8e738473, 0x8e7a8273,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0073, 0xc031003c,
+ 0x00000072, 0x80f2c072,
+ 0xbf8c007f, 0x80fc907c,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff1,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0xbefa0084,
+ 0xbefa00ff, 0x01000000,
+ 0xc0211cfc, 0x00000072,
+ 0x80728472, 0xc0211c3c,
0x00000072, 0x80728472,
- 0xc0211c3c, 0x00000072,
- 0x80728472, 0xc0211c7c,
+ 0xc0211c7c, 0x00000072,
+ 0x80728472, 0xc0211bbc,
0x00000072, 0x80728472,
- 0xc0211bbc, 0x00000072,
- 0x80728472, 0xc0211bfc,
+ 0xc0211bfc, 0x00000072,
+ 0x80728472, 0xc0211d3c,
0x00000072, 0x80728472,
- 0xc0211d3c, 0x00000072,
- 0x80728472, 0xc0211d7c,
+ 0xc0211d7c, 0x00000072,
+ 0x80728472, 0xc0211a3c,
0x00000072, 0x80728472,
- 0xc0211a3c, 0x00000072,
- 0x80728472, 0xc0211a7c,
+ 0xc0211a7c, 0x00000072,
+ 0x80728472, 0xc0211dfc,
0x00000072, 0x80728472,
- 0xc0211dfc, 0x00000072,
- 0x80728472, 0xc0211b3c,
+ 0xc0211b3c, 0x00000072,
+ 0x80728472, 0xc0211b7c,
0x00000072, 0x80728472,
- 0xc0211b7c, 0x00000072,
- 0x80728472, 0xbf8c007f,
- 0xbefc0073, 0xbefe006e,
- 0xbeff006f, 0x867375ff,
- 0x000003ff, 0xb9734803,
- 0x867375ff, 0xfffff800,
- 0x8f738b73, 0xb973a2c3,
- 0xb977f801, 0x8673ff71,
- 0xf0000000, 0x8f739c73,
- 0x8e739073, 0xbef60080,
- 0x87767376, 0x8673ff71,
- 0x08000000, 0x8f739b73,
- 0x8e738f73, 0x87767376,
- 0x8673ff74, 0x00800000,
- 0x8f739773, 0xb976f807,
- 0x8671ff71, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f768374, 0xb976e0c2,
- 0xbf800002, 0xb9740002,
- 0xbf8a0000, 0x95807370,
- 0xbf810000, 0x00000000,
+ 0xbf8c007f, 0xbefc0073,
+ 0xbefe006e, 0xbeff006f,
+ 0x867375ff, 0x000003ff,
+ 0xb9734803, 0x867375ff,
+ 0xfffff800, 0x8f738b73,
+ 0xb973a2c3, 0xb977f801,
+ 0x8673ff71, 0xf0000000,
+ 0x8f739c73, 0x8e739073,
+ 0xbef60080, 0x87767376,
+ 0x8673ff71, 0x08000000,
+ 0x8f739b73, 0x8e738f73,
+ 0x87767376, 0x8673ff74,
+ 0x00800000, 0x8f739773,
+ 0xb976f807, 0x8671ff71,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f768374,
+ 0xb976e0c2, 0xbf800002,
+ 0xb9740002, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820248,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf820254,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8fb1605,
- 0x807b817b, 0x8e7b867b,
- 0x807a7b7a, 0x807a7e7a,
- 0x827b807f, 0x867bff7b,
- 0x0000ffff, 0xc04b1c3d,
- 0x00000050, 0xbf8cc07f,
- 0xc04b1d3d, 0x00000060,
- 0xbf8cc07f, 0xc0431e7d,
- 0x00000074, 0xbf8cc07f,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x867aff7f, 0x08000000,
- 0x8f7a837a, 0x87777a77,
- 0x867aff7f, 0x70000000,
- 0x8f7a817a, 0x87777a77,
- 0xbef1007c, 0xbef00080,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0xbef1007c,
+ 0xbef00080, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840063,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf84005f,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840063, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf84005f, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2a05,
- 0x807b817b, 0x8e7b827b,
- 0x8e76887b, 0xbef600ff,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb2a05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
0x01000000, 0xbefc0084,
0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
@@ -566,15 +572,11 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x701d0300, 0x807c847c,
0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200da,
+ 0xbf9c0000, 0xbf8200c7,
0xbef4007e, 0x8675ff7f,
0x0000ffff, 0x8775ff75,
0x00040000, 0xbef60080,
0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
0x866eff7f, 0x04000000,
0xbf84001e, 0xbefe00c1,
0xbeff00c1, 0xb8ef4306,
@@ -591,28 +593,28 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x781d0000, 0x807cff7c,
0x00000200, 0x8078ff78,
0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
@@ -663,90 +665,101 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc00b1c37, 0x00000050,
0xc00b1d37, 0x00000060,
0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x866fff6d,
- 0xf8000000, 0x8f6f9b6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x04000000, 0x8f6f9a6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff7a, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e837a, 0xb96ee0c2,
- 0xbf800002, 0xb97a0002,
- 0xbf8a0000, 0x95806f6c,
- 0xbf810000, 0x00000000,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e837a,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb97a0002, 0xbf8a0000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
- 0xbf820001, 0xbf8201cd,
+ 0xbf820001, 0xbf8201f1,
0xb0804004, 0xb978f802,
- 0x8a788678, 0xb96ef801,
- 0x876eff6e, 0x00000800,
- 0xbf840003, 0x876eff78,
+ 0x8a78ff78, 0x00020006,
+ 0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
- 0xb97bf803, 0x876eff7b,
- 0x00000400, 0xbf850033,
- 0x876eff7b, 0x00000100,
- 0xbf840002, 0x8878ff78,
- 0x00002000, 0x8a77ff77,
- 0xff000000, 0xb96ef807,
- 0x876fff6e, 0x02000000,
- 0x8f6f866f, 0x88776f77,
- 0x876fff6e, 0x003f8000,
- 0x8f6f896f, 0x88776f77,
- 0x8a6eff6e, 0x023f8000,
- 0xb9eef807, 0xb97af812,
+ 0x876eff6d, 0x00ff0000,
+ 0xbf85001e, 0x876eff7b,
+ 0x00000400, 0xbf850057,
+ 0xbf8e0010, 0xb97bf803,
+ 0xbf82fffa, 0x876eff7b,
+ 0x00000900, 0xbf850015,
+ 0x876eff7b, 0x000071ff,
+ 0xbf840008, 0x876fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1d87, 0xb96ff801,
+ 0x8f6e8c6e, 0x876e6f6e,
+ 0xbf85000a, 0x876eff6d,
+ 0x00ff0000, 0xbf850007,
+ 0xb96ef801, 0x876eff6e,
+ 0x00000800, 0xbf850003,
+ 0x876eff7b, 0x00000400,
+ 0xbf85003c, 0x8a77ff77,
+ 0xff000000, 0xb97af807,
+ 0x877bff7a, 0x02000000,
+ 0x8f7b867b, 0x88777b77,
+ 0x877bff7a, 0x003f8000,
+ 0x8f7b897b, 0x88777b77,
+ 0x8a7aff7a, 0x023f8000,
+ 0xb9faf807, 0xb97af812,
0xb97bf813, 0x8ffa887a,
- 0xf4051bbd, 0xfa000000,
- 0xbf8cc07f, 0xf4051ebd,
- 0xfa000008, 0xbf8cc07f,
- 0x87ee6e6e, 0xbf840001,
- 0xbe80206e, 0xb97bf803,
- 0x877bff7b, 0x000001ff,
+ 0xf4011bbd, 0xfa000010,
+ 0xbf8cc07f, 0x8f6e976e,
+ 0x8a77ff77, 0x00800000,
+ 0x88776e77, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0x876eff6d, 0x01ff0000,
+ 0xbf850005, 0x8878ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x876eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x876dff6d,
- 0x0000ffff, 0x906e8977,
- 0x876fff6e, 0x003f8000,
- 0x906e8677, 0x876eff6e,
- 0x02000000, 0x886e6f6e,
- 0xb9eef807, 0x87fe7e7e,
+ 0x0000ffff, 0x907a8977,
+ 0x877bff7a, 0x003f8000,
+ 0x907a8677, 0x877aff7a,
+ 0x02000000, 0x887a7b7a,
+ 0xb9faf807, 0x87fe7e7e,
0x87ea6a6a, 0xb9f8f802,
0xbe80226c, 0x876dff6d,
0x0000ffff, 0xbefa0380,
- 0xb9fa0283, 0xb97a2c07,
- 0x8f7a9a7a, 0x886d7a6d,
- 0xb97a03c7, 0x8f7a997a,
- 0x886d7a6d, 0xb97a0647,
- 0x8f7a987a, 0x886d7a6d,
- 0xb97af807, 0x877aff7a,
- 0x00007fff, 0xb9faf807,
- 0xbeee037e, 0xbeef037f,
- 0xbefe0480, 0xbf900004,
- 0xbf8e0002, 0xbf88fffe,
- 0xb97b02dc, 0x8f7b997b,
- 0x887b7b7f, 0xb97a2a05,
+ 0xb9fa0283, 0x8a77ff77,
+ 0xff000000, 0xb97af807,
+ 0x877bff7a, 0x02000000,
+ 0x8f7b867b, 0x88777b77,
+ 0x877bff7a, 0x003f8000,
+ 0x8f7b897b, 0x88777b77,
+ 0x8a7aff7a, 0x023f8000,
+ 0xb9faf807, 0xbeee037e,
+ 0xbeef037f, 0xbefe0480,
+ 0xbf900004, 0xbf8e0002,
+ 0xbf88fffe, 0x877aff7f,
+ 0x04000000, 0x8f7a857a,
+ 0x886d7a6d, 0xb97b02dc,
+ 0x8f7b997b, 0xb97a2a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
- 0x877bff7f, 0x0000ffff,
- 0x807aff7a, 0x00000200,
- 0x807a7e7a, 0x827b807b,
- 0xf4491c3d, 0xfa000050,
- 0xf4491d3d, 0xfa000060,
- 0xf4411e7d, 0xfa000074,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x10807fac,
- 0x877aff7f, 0x08000000,
- 0x907a837a, 0x88777a77,
- 0x877aff7f, 0x70000000,
- 0x907a817a, 0x88777a77,
- 0xbef1037c, 0xbef00380,
- 0xb97302dc, 0x8f739973,
- 0x8873737f, 0xb97bf816,
+ 0xb97b1e06, 0x8f7b8a7b,
+ 0x807a7b7a, 0x877bff7f,
+ 0x0000ffff, 0x807aff7a,
+ 0x00000200, 0x807a7e7a,
+ 0x827b807b, 0xf4491c3d,
+ 0xfa000050, 0xf4491d3d,
+ 0xfa000060, 0xf4411e7d,
+ 0xfa000074, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0xbef1037c,
+ 0xbef00380, 0xb97302dc,
+ 0x8f739973, 0xb97bf816,
0xba80f816, 0x00000000,
0xbefe03c1, 0x907c9973,
0x877c817c, 0xbf06817c,
@@ -763,7 +776,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xe0704100, 0x705d0100,
0xe0704200, 0x705d0200,
0xe0704300, 0x705d0300,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -776,8 +789,9 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611b3a, 0xf8000000,
0x80708470, 0xbefc037e,
+ 0x8a7aff6d, 0x80000000,
0xbefe037c, 0xbefc0370,
- 0xf4611b7a, 0xf8000000,
+ 0xf4611eba, 0xf8000000,
0x80708470, 0xbefc037e,
0xbefe037c, 0xbefc0370,
0xf4611bba, 0xf8000000,
@@ -838,10 +852,10 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf820001, 0xbeff03c1,
0xb97b4306, 0x877bc17b,
0xbf840044, 0xbf8a0000,
- 0x877aff73, 0x04000000,
+ 0x877aff6d, 0x80000000,
0xbf840040, 0x8f7b867b,
0x8f7b827b, 0xbef6037b,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -877,7 +891,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbef003ff, 0x00000200,
0xbeff0380, 0xbf820003,
0xbef003ff, 0x00000400,
- 0xbeff03c1, 0xb97b2a05,
+ 0xbeff03c1, 0xb97b3a05,
0x807b817b, 0x8f7b827b,
0x907c9973, 0x877c817c,
0xbf06817c, 0xbf850017,
@@ -894,7 +908,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf0a7b7c, 0xbf85ffef,
0xbf820025, 0xbef603ff,
0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840020,
+ 0xbf0a7b7c, 0xbf840011,
0x7e008700, 0x7e028701,
0x7e048702, 0x7e068703,
0xe0704000, 0x705d0000,
@@ -911,71 +925,69 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x705d0000, 0x807c817c,
0x8070ff70, 0x00000080,
0xbf0a7b7c, 0xbf85fff8,
- 0xbf820151, 0xbef4037e,
+ 0xbf820144, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
- 0x10807fac, 0x876eff7f,
- 0x08000000, 0x906e836e,
- 0x88776e77, 0x876eff7f,
- 0x70000000, 0x906e816e,
- 0x88776e77, 0xb97202dc,
- 0x8f729972, 0x8872727f,
- 0x876eff7f, 0x04000000,
- 0xbf840034, 0xbefe03c1,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb96f4306,
- 0x876fc16f, 0xbf840029,
- 0x8f6f866f, 0x8f6f826f,
- 0xbef6036f, 0xb9782a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x8078ff78,
- 0x00000080, 0xbef603ff,
- 0x01000000, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850009,
- 0xe0310000, 0x781d0000,
- 0x807cff7c, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff8,
- 0xbf820008, 0xe0310000,
- 0x781d0000, 0x807cff7c,
- 0x00000100, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fff8, 0xbef80380,
+ 0x10807fac, 0xb97202dc,
+ 0x8f729972, 0x876eff7f,
+ 0x04000000, 0xbf840034,
0xbefe03c1, 0x907c9972,
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f2a05, 0x806f816f,
- 0x8f6f826f, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbf850021, 0xbef603ff,
- 0x01000000, 0xbeee0378,
+ 0xb96f4306, 0x876fc16f,
+ 0xbf840029, 0x8f6f866f,
+ 0x8f6f826f, 0xbef6036f,
+ 0xb9783a05, 0x80788178,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb96e1e06,
+ 0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbefc0384, 0xe0304000,
- 0x785d0000, 0xe0304080,
- 0x785d0100, 0xe0304100,
- 0x785d0200, 0xe0304180,
- 0x785d0300, 0xbf8c3f70,
- 0x7e008500, 0x7e028501,
- 0x7e048502, 0x7e068503,
- 0x807c847c, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85ffee, 0xe0304000,
- 0x6e5d0000, 0xe0304080,
- 0x6e5d0100, 0xe0304100,
- 0x6e5d0200, 0xe0304180,
- 0x6e5d0300, 0xbf820032,
+ 0x8078ff78, 0x00000080,
+ 0xbef603ff, 0x01000000,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbefc0380,
+ 0xbf850009, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbf820008,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000100,
+ 0x8078ff78, 0x00000100,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbef80380, 0xbefe03c1,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f3a05,
+ 0x806f816f, 0x8f6f826f,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850024,
+ 0xbef603ff, 0x01000000,
+ 0xbeee0378, 0x8078ff78,
+ 0x00000200, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840050,
+ 0xe0304000, 0x785d0000,
+ 0xe0304080, 0x785d0100,
+ 0xe0304100, 0x785d0200,
+ 0xe0304180, 0x785d0300,
+ 0xbf8c3f70, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xe0304000, 0x6e5d0000,
+ 0xe0304080, 0x6e5d0100,
+ 0xe0304100, 0x6e5d0200,
+ 0xe0304180, 0x6e5d0300,
+ 0xbf8c3f70, 0xbf820034,
0xbef603ff, 0x01000000,
0xbeee0378, 0x8078ff78,
0x00000400, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840012,
0xe0304000, 0x785d0000,
0xe0304100, 0x785d0100,
0xe0304200, 0x785d0200,
@@ -998,7 +1010,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x6e5d0100, 0xe0304200,
0x6e5d0200, 0xe0304300,
0x6e5d0300, 0xbf8c3f70,
- 0xb9782a05, 0x80788178,
+ 0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
@@ -1025,7 +1037,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbe8c310c, 0xbe8e310e,
0xbf06807c, 0xbf84fff0,
0xba80f801, 0x00000000,
- 0xbf8a0000, 0xb9782a05,
+ 0xbf8a0000, 0xb9783a05,
0x80788178, 0xbf0d9972,
0xbf850002, 0x8f788978,
0xbf820001, 0x8f788a78,
@@ -1060,270 +1072,272 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb96e2a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
- 0x8f6e8a6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x876fff6f,
- 0x0000ffff, 0xf4091c37,
- 0xfa000050, 0xf4091d37,
- 0xfa000060, 0xf4011e77,
- 0xfa000074, 0xbf8cc07f,
- 0x876fff6d, 0xfc000000,
- 0x906f9a6f, 0x8f6f906f,
- 0xbeee0380, 0x886e6f6e,
- 0x876fff6d, 0x02000000,
- 0x906f996f, 0x8f6f8f6f,
- 0x886e6f6e, 0x876fff6d,
- 0x01000000, 0x906f986f,
- 0x8f6f996f, 0x886e6f6e,
- 0x876fff7a, 0x00800000,
- 0x906f976f, 0xb9eef807,
- 0x876dff6d, 0x0000ffff,
- 0x87fe7e7e, 0x87ea6a6a,
- 0xb9faf802, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x8f6e8a6e, 0xb96f1e06,
+ 0x8f6f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x876fff6f, 0x0000ffff,
+ 0xf4091c37, 0xfa000050,
+ 0xf4091d37, 0xfa000060,
+ 0xf4011e77, 0xfa000074,
+ 0xbf8cc07f, 0x906e8977,
+ 0x876fff6e, 0x003f8000,
+ 0x906e8677, 0x876eff6e,
+ 0x02000000, 0x886e6f6e,
+ 0xb9eef807, 0x876dff6d,
+ 0x0000ffff, 0x87fe7e7e,
+ 0x87ea6a6a, 0xb9faf802,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202c4,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf8202d0,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
- 0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840064, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840060, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840064,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf840060,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2a05,
- 0x807b817b, 0x8e7b827b,
- 0x8e76887b, 0xbef600ff,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb2a05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
0x01000000, 0xbefc0084,
0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
@@ -1440,15 +1454,11 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x701d0300, 0x807c847c,
0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf820106,
+ 0xbf9c0000, 0xbf8200e3,
0xbef4007e, 0x8675ff7f,
0x0000ffff, 0x8775ff75,
0x00040000, 0xbef60080,
0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
0x866eff7f, 0x04000000,
0xbf84001f, 0xbefe00c1,
0xbeff00c1, 0xb8ef4306,
@@ -1466,26 +1476,14 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x807cff7c, 0x00000200,
0x8078ff78, 0x00000200,
0xbf0a6f7c, 0xbf85fff6,
- 0xbef80080, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x8e76886f, 0xbef90076,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbef30079,
- 0x8079ff79, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0x806fff6f, 0x00008000,
- 0xe0524000, 0x791d0000,
- 0xe0524100, 0x791d0100,
- 0xe0524200, 0x791d0200,
- 0xe0524300, 0x791d0300,
- 0x8079ff79, 0x00000400,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0xe0524000,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
@@ -1494,20 +1492,24 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffdb, 0xbf9c0000,
- 0xe0524000, 0x731d0000,
- 0xe0524100, 0x731d0100,
- 0xe0524200, 0x731d0200,
- 0xe0524300, 0x731d0300,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xb8f82a05,
+ 0xbf85ffee, 0xbefc0080,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0xd3d94000, 0x18000100,
+ 0xd3d94001, 0x18000101,
+ 0xd3d94002, 0x18000102,
+ 0xd3d94003, 0x18000103,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffea, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0x8e788178, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
@@ -1559,224 +1561,268 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc00b1c37, 0x00000050,
0xc00b1d37, 0x00000060,
0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x866fff6d,
- 0xf8000000, 0x8f6f9b6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x04000000, 0x8f6f9a6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff7a, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e837a, 0xb96ee0c2,
- 0xbf800002, 0xb97a0002,
- 0xbf8a0000, 0x95806f6c,
- 0xbf810000, 0x00000000,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e837a,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb97a0002, 0xbf8a0000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202ce,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf8202db,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2985, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
+ 0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840064,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf840060,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840064, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840060, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
+ 0xb8fb2b05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf84006d,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
+ 0x10000000, 0xbf850051,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1796,31 +1842,51 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
- 0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2b05,
- 0x807b817b, 0x8e7b827b,
- 0xbef600ff, 0x01000000,
- 0xbefc0084, 0xbf0a7b7c,
- 0xbf84006d, 0xbf11017c,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xb8fb2985,
+ 0x807b817b, 0x8e7b837b,
+ 0xb8fa2b05, 0x807a817a,
+ 0x8e7a827a, 0x80fb7a7b,
+ 0x867b7b7b, 0xbf84007a,
0x807bff7b, 0x00001000,
+ 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850051, 0xbe840080,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1859,233 +1925,178 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffb1,
- 0xbf9c0000, 0xbf820012,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffef, 0xbf9c0000,
- 0xb8fb2985, 0x807b817b,
- 0x8e7b837b, 0xb8fa2b05,
- 0x807a817a, 0x8e7a827a,
- 0x80fb7a7b, 0x867b7b7b,
- 0xbf84007a, 0x807bff7b,
- 0x00001000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf820101,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200ee, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2b05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2b05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb8ef2985, 0x806f816f,
+ 0x8e6f836f, 0xb8f92b05,
+ 0x80798179, 0x8e798279,
+ 0x80ef796f, 0x866f6f6f,
+ 0xbf84001a, 0x806fff6f,
+ 0x00008000, 0xbefc0080,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xd3d94000, 0x18000100,
+ 0xd3d94001, 0x18000101,
+ 0xd3d94002, 0x18000102,
+ 0xd3d94003, 0x18000103,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xb8ef2985,
- 0x806f816f, 0x8e6f836f,
- 0xb8f92b05, 0x80798179,
- 0x8e798279, 0x80ef796f,
- 0x866f6f6f, 0xbf84001a,
- 0x806fff6f, 0x00008000,
- 0xbefc0080, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffea,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x80f8c078, 0xb8ef1605,
- 0x806f816f, 0x8e6f846f,
- 0x8e76826f, 0xbef600ff,
- 0x01000000, 0xbefc006f,
- 0xc031003a, 0x00000078,
- 0x80f8c078, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe802d00, 0xbe822d02,
- 0xbe842d04, 0xbe862d06,
- 0xbe882d08, 0xbe8a2d0a,
- 0xbe8c2d0c, 0xbe8e2d0e,
- 0xbf06807c, 0xbf84fff0,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xbf85ffea, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2985,
- 0x806e816e, 0x8e6e8a6e,
- 0x8e6e816e, 0xb8ef1605,
- 0x806f816f, 0x8e6f866f,
- 0x806e6f6e, 0x806e746e,
- 0x826f8075, 0x866fff6f,
- 0x0000ffff, 0xc00b1c37,
- 0x00000050, 0xc00b1d37,
- 0x00000060, 0xc0031e77,
- 0x00000074, 0xbf8cc07f,
- 0x866fff6d, 0xf8000000,
- 0x8f6f9b6f, 0x8e6f906f,
- 0xbeee0080, 0x876e6f6e,
- 0x866fff6d, 0x04000000,
- 0x8f6f9a6f, 0x8e6f8f6f,
- 0x876e6f6e, 0x866fff7a,
- 0x00800000, 0x8f6f976f,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2985, 0x806e816e,
+ 0x8e6e8a6e, 0x8e6e816e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
0xb96ef807, 0x866dff6d,
0x0000ffff, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0x95806f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf8201cf,
+ 0xbf820001, 0xbf82021c,
0xb0804004, 0xb978f802,
- 0x8a788678, 0xb96ef801,
- 0x876eff6e, 0x00000800,
- 0xbf840003, 0x876eff78,
+ 0x8a78ff78, 0x00020006,
+ 0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
- 0xb97bf803, 0x876eff7b,
- 0x00000400, 0xbf85001d,
- 0x876eff7b, 0x00000100,
- 0xbf840002, 0x8878ff78,
- 0x00002000, 0xb97af812,
+ 0x876eff6d, 0x00ff0000,
+ 0xbf85001e, 0x876eff7b,
+ 0x00000400, 0xbf850041,
+ 0xbf8e0010, 0xb97bf803,
+ 0xbf82fffa, 0x876eff7b,
+ 0x00000900, 0xbf850015,
+ 0x876eff7b, 0x000071ff,
+ 0xbf840008, 0x876fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1d87, 0xb96ff801,
+ 0x8f6e8c6e, 0x876e6f6e,
+ 0xbf85000a, 0x876eff6d,
+ 0x00ff0000, 0xbf850007,
+ 0xb96ef801, 0x876eff6e,
+ 0x00000800, 0xbf850003,
+ 0x876eff7b, 0x00000400,
+ 0xbf850026, 0xb97af812,
0xb97bf813, 0x8ffa887a,
- 0xf4051bbd, 0xfa000000,
- 0xbf8cc07f, 0xf4051ebd,
- 0xfa000008, 0xbf8cc07f,
- 0x87ee6e6e, 0xbf840001,
- 0xbe80206e, 0xb97bf803,
- 0x877bff7b, 0x000001ff,
+ 0xf4011bbd, 0xfa000010,
+ 0xbf8cc07f, 0x8f6e976e,
+ 0x8a77ff77, 0x00800000,
+ 0x88776e77, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0x876eff6d, 0x01ff0000,
+ 0xbf850005, 0x8878ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x876eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
@@ -2095,37 +2106,55 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9fa0283, 0xbeee037e,
0xbeef037f, 0xbefe0480,
0xbf900004, 0xbf8cc07f,
+ 0x877aff7f, 0x04000000,
+ 0x8f7a857a, 0x886d7a6d,
+ 0xbefa037e, 0x877bff7f,
+ 0x0000ffff, 0xbefe03c1,
+ 0xbeff03c1, 0xdc5f8000,
+ 0x007a0000, 0x7e000280,
+ 0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0x887b7b7f, 0xb97a2a05,
- 0x807a817a, 0xbf0d997b,
- 0xbf850002, 0x8f7a897a,
- 0xbf820001, 0x8f7a8a7a,
+ 0xb97a2a05, 0x807a817a,
+ 0xbf0d997b, 0xbf850002,
+ 0x8f7a897a, 0xbf820001,
+ 0x8f7a8a7a, 0xb97b1e06,
+ 0x8f7b8a7b, 0x807a7b7a,
0x877bff7f, 0x0000ffff,
0x807aff7a, 0x00000200,
0x807a7e7a, 0x827b807b,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x10807fac,
- 0x877aff7f, 0x08000000,
- 0x907a837a, 0x88777a77,
- 0x877aff7f, 0x70000000,
- 0x907a817a, 0x88777a77,
- 0xbef1037c, 0xbef00380,
- 0xb97302dc, 0x8f739973,
- 0x8873737f, 0xbefe03c1,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe03ff, 0x00003fff,
+ 0xbeff0380, 0xdc5f8040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe037a,
+ 0xbeff037b, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0xbef1037c,
+ 0xbef00380, 0xb97302dc,
+ 0x8f739973, 0xbefe03c1,
0x907c9973, 0x877c817c,
0xbf06817c, 0xbf850002,
0xbeff0380, 0xbf820002,
- 0xbeff03c1, 0xbf82000b,
+ 0xbeff03c1, 0xbf820009,
0xbef603ff, 0x01000000,
- 0xe0704000, 0x705d0000,
0xe0704080, 0x705d0100,
0xe0704100, 0x705d0200,
0xe0704180, 0x705d0300,
- 0xbf82000a, 0xbef603ff,
- 0x01000000, 0xe0704000,
- 0x705d0000, 0xe0704100,
+ 0xbf820008, 0xbef603ff,
+ 0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
0x705d0300, 0xb9702a05,
@@ -2140,8 +2169,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefc0380, 0xd7610002,
0x0000f871, 0x807c817c,
0xd7610002, 0x0000f86c,
- 0x807c817c, 0xd7610002,
- 0x0000f86d, 0x807c817c,
+ 0x807c817c, 0x8a7aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000f87a, 0x807c817c,
0xd7610002, 0x0000f86e,
0x807c817c, 0xd7610002,
0x0000f86f, 0x807c817c,
@@ -2156,160 +2186,157 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x0000f871, 0x807c817c,
0xb971f815, 0xd7610002,
0x0000f871, 0x807c817c,
+ 0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
- 0x705d0200, 0xb9702a05,
- 0x80708170, 0xbf0d9973,
- 0xbf850002, 0x8f708970,
- 0xbf820001, 0x8f708a70,
- 0xb97a1e06, 0x8f7a8a7a,
- 0x80707a70, 0xbef603ff,
- 0x01000000, 0xbef90380,
- 0xbefc0380, 0xbf800000,
- 0xbe802f00, 0xbe822f02,
- 0xbe842f04, 0xbe862f06,
- 0xbe882f08, 0xbe8a2f0a,
- 0xbe8c2f0c, 0xbe8e2f0e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x705d0200, 0xbefe03c1,
+ 0xb9702a05, 0x80708170,
+ 0xbf0d9973, 0xbf850002,
+ 0x8f708970, 0xbf820001,
+ 0x8f708a70, 0xb97a1e06,
+ 0x8f7a8a7a, 0x80707a70,
+ 0xbef603ff, 0x01000000,
+ 0xbef90380, 0xbefc0380,
+ 0xbf800000, 0xbe802f00,
+ 0xbe822f02, 0xbe842f04,
+ 0xbe862f06, 0xbe882f08,
+ 0xbe8a2f0a, 0xbe8c2f0c,
+ 0xbe8e2f0e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbf840006,
- 0xe0704000, 0x705d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90380, 0x7e040280,
- 0x807c907c, 0xbf0aff7c,
- 0x00000060, 0xbf85ffbc,
- 0xbe802f00, 0xbe822f02,
- 0xbe842f04, 0xbe862f06,
- 0xbe882f08, 0xbe8a2f0a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbf840006, 0xe0704000,
+ 0x705d0200, 0x8070ff70,
+ 0x00000080, 0xbef90380,
+ 0x7e040280, 0x807c907c,
+ 0xbf0aff7c, 0x00000060,
+ 0xbf85ffbc, 0xbe802f00,
+ 0xbe822f02, 0xbe842f04,
+ 0xbe862f06, 0xbe882f08,
+ 0xbe8a2f0a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0704000, 0x705d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0704000,
+ 0x705d0200, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb97b4306,
+ 0x877bc17b, 0xbf840044,
+ 0xbf8a0000, 0x877aff6d,
+ 0x80000000, 0xbf840040,
+ 0x8f7b867b, 0x8f7b827b,
+ 0xbef6037b, 0xb9703a05,
+ 0x80708170, 0xbf0d9973,
+ 0xbf850002, 0x8f708970,
+ 0xbf820001, 0x8f708a70,
+ 0xb97a1e06, 0x8f7a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef603ff,
+ 0x01000000, 0xd7650000,
+ 0x000100c1, 0xd7660000,
+ 0x000200c1, 0x16000084,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbefc0380,
+ 0xbf850012, 0xbe8303ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xe0704000,
+ 0x705d0100, 0x807c037c,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7c, 0xbf85fff4,
+ 0xbf820011, 0xbe8303ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xe0704000,
+ 0x705d0100, 0x807c037c,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7c, 0xbf85fff4,
0xbefe03c1, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbf850002, 0xbeff0380,
- 0xbf820001, 0xbeff03c1,
- 0xb97b4306, 0x877bc17b,
- 0xbf840044, 0xbf8a0000,
- 0x877aff73, 0x04000000,
- 0xbf840040, 0x8f7b867b,
- 0x8f7b827b, 0xbef6037b,
- 0xb9702a05, 0x80708170,
- 0xbf0d9973, 0xbf850002,
- 0x8f708970, 0xbf820001,
- 0x8f708a70, 0xb97a1e06,
- 0x8f7a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef603ff, 0x01000000,
- 0xd7650000, 0x000100c1,
- 0xd7660000, 0x000200c1,
- 0x16000084, 0x907c9973,
+ 0xbf850004, 0xbef003ff,
+ 0x00000200, 0xbeff0380,
+ 0xbf820003, 0xbef003ff,
+ 0x00000400, 0xbeff03c1,
+ 0xb97b3a05, 0x807b817b,
+ 0x8f7b827b, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850012,
- 0xbe8303ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf8c0000,
- 0xe0704000, 0x705d0100,
- 0x807c037c, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7c,
- 0xbf85fff4, 0xbf820011,
- 0xbe8303ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf8c0000,
- 0xe0704000, 0x705d0100,
- 0x807c037c, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7c,
- 0xbf85fff4, 0xbefe03c1,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850004,
- 0xbef003ff, 0x00000200,
- 0xbeff0380, 0xbf820003,
- 0xbef003ff, 0x00000400,
- 0xbeff03c1, 0xb97b2a05,
- 0x807b817b, 0x8f7b827b,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850017,
+ 0xbf850017, 0xbef603ff,
+ 0x01000000, 0xbefc0384,
+ 0xbf0a7b7c, 0xbf840037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0704000, 0x705d0000,
+ 0xe0704080, 0x705d0100,
+ 0xe0704100, 0x705d0200,
+ 0xe0704180, 0x705d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf820025,
0xbef603ff, 0x01000000,
0xbefc0384, 0xbf0a7b7c,
- 0xbf840037, 0x7e008700,
+ 0xbf840011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0704000,
- 0x705d0000, 0xe0704080,
- 0x705d0100, 0xe0704100,
- 0x705d0200, 0xe0704180,
+ 0x705d0000, 0xe0704100,
+ 0x705d0100, 0xe0704200,
+ 0x705d0200, 0xe0704300,
0x705d0300, 0x807c847c,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffef,
- 0xbf820025, 0xbef603ff,
- 0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840020,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb97b1e06, 0x877bc17b,
+ 0xbf84000c, 0x8f7b837b,
+ 0x807b7c7b, 0xbefe03c1,
+ 0xbeff0380, 0x7e008700,
0xe0704000, 0x705d0000,
- 0xe0704100, 0x705d0100,
- 0xe0704200, 0x705d0200,
- 0xe0704300, 0x705d0300,
- 0x807c847c, 0x8070ff70,
- 0x00000400, 0xbf0a7b7c,
- 0xbf85ffef, 0xb97b1e06,
- 0x877bc17b, 0xbf84000c,
- 0x8f7b837b, 0x807b7c7b,
- 0xbefe03c1, 0xbeff0380,
- 0x7e008700, 0xe0704000,
- 0x705d0000, 0x807c817c,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7c, 0xbf85fff8,
- 0xbf82013c, 0xbef4037e,
- 0x8775ff7f, 0x0000ffff,
- 0x8875ff75, 0x00040000,
- 0xbef60380, 0xbef703ff,
- 0x10807fac, 0x876eff7f,
- 0x08000000, 0x906e836e,
- 0x88776e77, 0x876eff7f,
- 0x70000000, 0x906e816e,
- 0x88776e77, 0xb97202dc,
- 0x8f729972, 0x8872727f,
+ 0x807c817c, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7c,
+ 0xbf85fff8, 0xbf82013b,
+ 0xbef4037e, 0x8775ff7f,
+ 0x0000ffff, 0x8875ff75,
+ 0x00040000, 0xbef60380,
+ 0xbef703ff, 0x10807fac,
+ 0xb97202dc, 0x8f729972,
0x876eff7f, 0x04000000,
0xbf840034, 0xbefe03c1,
0x907c9972, 0x877c817c,
@@ -2318,7 +2345,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbeff03c1, 0xb96f4306,
0x876fc16f, 0xbf840029,
0x8f6f866f, 0x8f6f826f,
- 0xbef6036f, 0xb9782a05,
+ 0xbef6036f, 0xb9783a05,
0x80788178, 0xbf0d9972,
0xbf850002, 0x8f788978,
0xbf820001, 0x8f788a78,
@@ -2342,13 +2369,14 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f2a05, 0x806f816f,
+ 0xb96f3a05, 0x806f816f,
0x8f6f826f, 0x907c9972,
0x877c817c, 0xbf06817c,
- 0xbf850021, 0xbef603ff,
+ 0xbf850024, 0xbef603ff,
0x01000000, 0xbeee0378,
0x8078ff78, 0x00000200,
- 0xbefc0384, 0xe0304000,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840050, 0xe0304000,
0x785d0000, 0xe0304080,
0x785d0100, 0xe0304100,
0x785d0200, 0xe0304180,
@@ -2361,94 +2389,97 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x6e5d0000, 0xe0304080,
0x6e5d0100, 0xe0304100,
0x6e5d0200, 0xe0304180,
- 0x6e5d0300, 0xbf820032,
- 0xbef603ff, 0x01000000,
- 0xbeee0378, 0x8078ff78,
- 0x00000400, 0xbefc0384,
+ 0x6e5d0300, 0xbf8c3f70,
+ 0xbf820034, 0xbef603ff,
+ 0x01000000, 0xbeee0378,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840012, 0xe0304000,
+ 0x785d0000, 0xe0304100,
+ 0x785d0100, 0xe0304200,
+ 0x785d0200, 0xe0304300,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb96f1e06,
+ 0x876fc16f, 0xbf84000e,
+ 0x8f6f836f, 0x806f7c6f,
+ 0xbefe03c1, 0xbeff0380,
0xe0304000, 0x785d0000,
- 0xe0304100, 0x785d0100,
- 0xe0304200, 0x785d0200,
- 0xe0304300, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb96f1e06, 0x876fc16f,
- 0xbf84000e, 0x8f6f836f,
- 0x806f7c6f, 0xbefe03c1,
- 0xbeff0380, 0xe0304000,
- 0x785d0000, 0xbf8c3f70,
- 0x7e008500, 0x807c817c,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff7,
- 0xbeff03c1, 0xe0304000,
- 0x6e5d0000, 0xe0304100,
- 0x6e5d0100, 0xe0304200,
- 0x6e5d0200, 0xe0304300,
- 0x6e5d0300, 0xbf8c3f70,
- 0xb9782a05, 0x80788178,
+ 0x807c817c, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff7, 0xbeff03c1,
+ 0xe0304000, 0x6e5d0000,
+ 0xe0304100, 0x6e5d0100,
+ 0xe0304200, 0x6e5d0200,
+ 0xe0304300, 0x6e5d0300,
+ 0xbf8c3f70, 0xb9783a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef603ff,
+ 0x01000000, 0xbefc03ff,
+ 0x0000006c, 0x80f89078,
+ 0xf429003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc847c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0x80f8a078,
+ 0xf42d003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc887c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0x80f8c078,
+ 0xf431003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0xbe883108,
+ 0xbe8a310a, 0xbe8c310c,
+ 0xbe8e310e, 0xbf06807c,
+ 0xbf84fff0, 0xba80f801,
+ 0x00000000, 0xbf8a0000,
+ 0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef603ff, 0x01000000,
- 0xbefc03ff, 0x0000006c,
- 0x80f89078, 0xf429003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc847c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0x80f8a078, 0xf42d003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc887c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0x80f8c078, 0xf431003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0xbe883108, 0xbe8a310a,
- 0xbe8c310c, 0xbe8e310e,
- 0xbf06807c, 0xbf84fff0,
- 0xba80f801, 0x00000000,
- 0xbf8a0000, 0xb9782a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbefc036f, 0xbefe0370,
- 0xbeff0371, 0x876f7bff,
- 0x000003ff, 0xb9ef4803,
- 0x876f7bff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0xb96e2a05,
- 0x806e816e, 0xbf0d9972,
- 0xbf850002, 0x8f6e896e,
- 0xbf820001, 0x8f6e8a6e,
+ 0xb9eef815, 0xbefc036f,
+ 0xbefe0370, 0xbeff0371,
+ 0x876f7bff, 0x000003ff,
+ 0xb9ef4803, 0x876f7bff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0xb96e2a05, 0x806e816e,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f6e896e, 0xbf820001,
+ 0x8f6e8a6e, 0xb96f1e06,
+ 0x8f6f8a6f, 0x806e6f6e,
0x806eff6e, 0x00000200,
0x806e746e, 0x826f8075,
0x876fff6f, 0x0000ffff,
@@ -2463,3 +2494,440 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
+static const uint32_t cwsr_trap_gfx11_hex[] = {
+ 0xbfa00001, 0xbfa0021b,
+ 0xb0804006, 0xb8f8f802,
+ 0x91788678, 0xb8fbf803,
+ 0x8b6eff78, 0x00002000,
+ 0xbfa10009, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa2001e,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20041, 0xbf830010,
+ 0xb8fbf803, 0xbfa0fffa,
+ 0x8b6eff7b, 0x00000900,
+ 0xbfa20015, 0x8b6eff7b,
+ 0x000071ff, 0xbfa10008,
+ 0x8b6fff7b, 0x00007080,
+ 0xbfa10001, 0xbeee1287,
+ 0xb8eff801, 0x846e8c6e,
+ 0x8b6e6f6e, 0xbfa2000a,
+ 0x8b6eff6d, 0x00ff0000,
+ 0xbfa20007, 0xb8eef801,
+ 0x8b6eff6e, 0x00000800,
+ 0xbfa20003, 0x8b6eff7b,
+ 0x00000400, 0xbfa20026,
+ 0xbefa4d82, 0xbf89fc07,
+ 0x84fa887a, 0xf4005bbd,
+ 0xf8000010, 0xbf89fc07,
+ 0x846e976e, 0x9177ff77,
+ 0x00800000, 0x8c776e77,
+ 0xf4045bbd, 0xf8000000,
+ 0xbf89fc07, 0xf4045ebd,
+ 0xf8000008, 0xbf89fc07,
+ 0x8bee6e6e, 0xbfa10001,
+ 0xbe80486e, 0x8b6eff6d,
+ 0x01ff0000, 0xbfa20005,
+ 0x8c78ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+ 0xbfa00005, 0x8b6eff6d,
+ 0x01000000, 0xbfa20002,
+ 0x806c846c, 0x826d806d,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb978f802, 0xbe804a6c,
+ 0x8b6dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbefe4d84,
+ 0xbf89fc07, 0x8b7aff7f,
+ 0x04000000, 0x847a857a,
+ 0x8c6d7a6d, 0xbefa007e,
+ 0x8b7bff7f, 0x0000ffff,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xdca6c000, 0x007a0000,
+ 0x7e000280, 0xbefe007a,
+ 0xbeff007b, 0xb8fb02dc,
+ 0x847b997b, 0xb8fa3b05,
+ 0x807a817a, 0xbf0d997b,
+ 0xbfa20002, 0x847a897a,
+ 0xbfa00001, 0x847a8a7a,
+ 0xb8fb1e06, 0x847b8a7b,
+ 0x807a7b7a, 0x8b7bff7f,
+ 0x0000ffff, 0x807aff7a,
+ 0x00000200, 0x807a7e7a,
+ 0x827b807b, 0xd7610000,
+ 0x00010870, 0xd7610000,
+ 0x00010a71, 0xd7610000,
+ 0x00010c72, 0xd7610000,
+ 0x00010e73, 0xd7610000,
+ 0x00011074, 0xd7610000,
+ 0x00011275, 0xd7610000,
+ 0x00011476, 0xd7610000,
+ 0x00011677, 0xd7610000,
+ 0x00011a79, 0xd7610000,
+ 0x00011c7e, 0xd7610000,
+ 0x00011e7f, 0xbefe00ff,
+ 0x00003fff, 0xbeff0080,
+ 0xdca6c040, 0x007a0000,
+ 0xd760007a, 0x00011d00,
+ 0xd760007b, 0x00011f00,
+ 0xbefe007a, 0xbeff007b,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007d, 0xbef00080,
+ 0xb8f302dc, 0x84739973,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00002, 0xbeff00c1,
+ 0xbfa00009, 0xbef600ff,
+ 0x01000000, 0xe0685080,
+ 0x701d0100, 0xe0685100,
+ 0x701d0200, 0xe0685180,
+ 0x701d0300, 0xbfa00008,
+ 0xbef600ff, 0x01000000,
+ 0xe0685100, 0x701d0100,
+ 0xe0685200, 0x701d0200,
+ 0xe0685300, 0x701d0300,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
+ 0xbef600ff, 0x01000000,
+ 0x7e000280, 0x7e020280,
+ 0x7e040280, 0xbefd0080,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6c, 0x807d817d,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6e, 0x807d817d,
+ 0xd7610002, 0x0000fa6f,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa78, 0x807d817d,
+ 0xb8faf803, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa7b,
+ 0x807d817d, 0xb8f1f801,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xb8f1f814,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xb8f1f815,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xbefe00ff,
+ 0x0000ffff, 0xbeff0080,
+ 0xe0685000, 0x701d0200,
+ 0xbefe00c1, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0xbef600ff,
+ 0x01000000, 0xbef90080,
+ 0xbefd0080, 0xbf800000,
+ 0xbe804100, 0xbe824102,
+ 0xbe844104, 0xbe864106,
+ 0xbe884108, 0xbe8a410a,
+ 0xbe8c410c, 0xbe8e410e,
+ 0xd7610002, 0x0000f200,
+ 0x80798179, 0xd7610002,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
+ 0x80798179, 0xd7610002,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
+ 0x80798179, 0xd7610002,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
+ 0x80798179, 0xd7610002,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
+ 0x80798179, 0xd7610002,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
+ 0x80798179, 0xd7610002,
+ 0x0000f20b, 0x80798179,
+ 0xd7610002, 0x0000f20c,
+ 0x80798179, 0xd7610002,
+ 0x0000f20d, 0x80798179,
+ 0xd7610002, 0x0000f20e,
+ 0x80798179, 0xd7610002,
+ 0x0000f20f, 0x80798179,
+ 0xbf06a079, 0xbfa10006,
+ 0xe0685000, 0x701d0200,
+ 0x8070ff70, 0x00000080,
+ 0xbef90080, 0x7e040280,
+ 0x807d907d, 0xbf0aff7d,
+ 0x00000060, 0xbfa2ffbc,
+ 0xbe804100, 0xbe824102,
+ 0xbe844104, 0xbe864106,
+ 0xbe884108, 0xbe8a410a,
+ 0xd7610002, 0x0000f200,
+ 0x80798179, 0xd7610002,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
+ 0x80798179, 0xd7610002,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
+ 0x80798179, 0xd7610002,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
+ 0x80798179, 0xd7610002,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
+ 0x80798179, 0xd7610002,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
+ 0x80798179, 0xd7610002,
+ 0x0000f20b, 0x80798179,
+ 0xe0685000, 0x701d0200,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0xbfbd0000,
+ 0x8b7aff6d, 0x80000000,
+ 0xbfa10040, 0x847b867b,
+ 0x847b827b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20012,
+ 0xbe8300ff, 0x00000080,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf890000,
+ 0xe0685000, 0x701d0100,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff4, 0xbfa00011,
+ 0xbe8300ff, 0x00000100,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf890000,
+ 0xe0685000, 0x701d0100,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff4, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20017,
+ 0xbef600ff, 0x01000000,
+ 0xbefd0084, 0xbf0a7b7d,
+ 0xbfa10037, 0x7e008700,
+ 0x7e028701, 0x7e048702,
+ 0x7e068703, 0xe0685000,
+ 0x701d0000, 0xe0685080,
+ 0x701d0100, 0xe0685100,
+ 0x701d0200, 0xe0685180,
+ 0x701d0300, 0x807d847d,
+ 0x8070ff70, 0x00000200,
+ 0xbf0a7b7d, 0xbfa2ffef,
+ 0xbfa00025, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10011,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685100, 0x701d0100,
+ 0xe0685200, 0x701d0200,
+ 0xe0685300, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000c,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xe0685000,
+ 0x701d0000, 0x807d817d,
+ 0x8070ff70, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff8,
+ 0xbfa00141, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xb8f202dc,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003a,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa1002f, 0x846f866f,
+ 0x846f826f, 0xbef6006f,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
+ 0x8078ff78, 0x00000200,
+ 0x8078ff78, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa2000c, 0xe0500000,
+ 0x781d0000, 0xbf8903f7,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff5,
+ 0xbfa0000b, 0xe0500000,
+ 0x781d0000, 0xbf8903f7,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000100,
+ 0x8078ff78, 0x00000100,
+ 0xbf0a6f7d, 0xbfa2fff5,
+ 0xbef80080, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef3b05,
+ 0x806f816f, 0x846f826f,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20024,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000200, 0xbefd0084,
+ 0xbf0a6f7d, 0xbfa10050,
+ 0xe0505000, 0x781d0000,
+ 0xe0505080, 0x781d0100,
+ 0xe0505100, 0x781d0200,
+ 0xe0505180, 0x781d0300,
+ 0xbf8903f7, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807d847d,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7d, 0xbfa2ffee,
+ 0xe0505000, 0x6e1d0000,
+ 0xe0505080, 0x6e1d0100,
+ 0xe0505100, 0x6e1d0200,
+ 0xe0505180, 0x6e1d0300,
+ 0xbf8903f7, 0xbfa00034,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefd0084,
+ 0xbf0a6f7d, 0xbfa10012,
+ 0xe0505000, 0x781d0000,
+ 0xe0505100, 0x781d0100,
+ 0xe0505200, 0x781d0200,
+ 0xe0505300, 0x781d0300,
+ 0xbf8903f7, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807d847d,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7d, 0xbfa2ffee,
+ 0xb8ef1e06, 0x8b6fc16f,
+ 0xbfa1000e, 0x846f836f,
+ 0x806f7d6f, 0xbefe00c1,
+ 0xbeff0080, 0xe0505000,
+ 0x781d0000, 0xbf8903f7,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff7,
+ 0xbeff00c1, 0xe0505000,
+ 0x6e1d0000, 0xe0505100,
+ 0x6e1d0100, 0xe0505200,
+ 0x6e1d0200, 0xe0505300,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef600ff, 0x01000000,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf428403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf42c403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf430403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xbfbd0000, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0xf4205bfa,
+ 0xf0000000, 0x80788478,
+ 0xf4205b3a, 0xf0000000,
+ 0x80788478, 0xf4205b7a,
+ 0xf0000000, 0x80788478,
+ 0xf4205c3a, 0xf0000000,
+ 0x80788478, 0xf4205c7a,
+ 0xf0000000, 0x80788478,
+ 0xf4205eba, 0xf0000000,
+ 0x80788478, 0xf4205efa,
+ 0xf0000000, 0x80788478,
+ 0xf4205e7a, 0xf0000000,
+ 0x80788478, 0xf4205cfa,
+ 0xf0000000, 0x80788478,
+ 0xf4205bba, 0xf0000000,
+ 0x80788478, 0xbf89fc07,
+ 0xb96ef814, 0xf4205bba,
+ 0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef815,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0x8b6f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x8b6f7bff, 0xfffff800,
+ 0x856f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4085c37,
+ 0xf8000050, 0xf4085d37,
+ 0xf8000060, 0xf4005e77,
+ 0xf8000074, 0xbf89fc07,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af802, 0xbe804a6c,
+ 0xbfb00000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 5081f91190b8..250ab007399b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -23,37 +23,52 @@
/* To compile this assembly code:
*
* Navi1x:
- * cpp -DASIC_TARGET_NAVI1X=1 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
- * sp3-nv1x nv1x.sp3 -hex nv1x.hex
+ * cpp -DASIC_FAMILY=CHIP_NAVI10 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
+ * sp3 nv1x.sp3 -hex nv1x.hex
*
- * Others:
- * cpp -DASIC_TARGET_NAVI1X=0 cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
- * sp3-gfx10 gfx10.sp3 -hex gfx10.hex
+ * gfx10:
+ * cpp -DASIC_FAMILY=CHIP_SIENNA_CICHLID cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
+ * sp3 gfx10.sp3 -hex gfx10.hex
+ *
+ * gfx11:
+ * cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
+ * sp3 gfx11.sp3 -hex gfx11.hex
*/
-#define NO_SQC_STORE !ASIC_TARGET_NAVI1X
+#define CHIP_NAVI10 26
+#define CHIP_SIENNA_CICHLID 30
+#define CHIP_PLUM_BONITO 36
+
+#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
+#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
+#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
+#if ASIC_FAMILY < CHIP_PLUM_BONITO
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+#else
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+#endif
+
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF
+var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
@@ -63,46 +78,37 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
+
+var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15
var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT = 25
-var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE = 1
var SQ_WAVE_IB_STS_REPLAY_W64H_MASK = 0x02000000
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1
-var SQ_WAVE_IB_STS_RCNT_SIZE = 6
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 24
var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0x7F000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
+var S_SAVE_PC_HI_HT_MASK = 0x01000000
var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_MASK = 0xFC000000
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 25
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000
-var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24
-var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
var s_sgpr_save_num = 108
@@ -130,19 +136,10 @@ var s_save_ttmps_hi = s_save_trapsts
var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
var S_WAVE_SIZE = 25
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
var s_restore_mem_offset = ttmp12
@@ -179,84 +176,133 @@ L_JUMP_TO_RESTORE:
L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK
-if SINGLE_STEP_MISSED_WORKAROUND
- // No single step exceptions if MODE.DEBUG_EN=0.
- s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
- s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
+
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- // Second-level trap already handled exception if STATUS.HALT=1.
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_CHECK_TRAP_ID
+
+ s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_bitset1_b32 ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_MODE)
+ s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ s_and_b32 ttmp2, ttmp2, ttmp3
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
- s_cbranch_scc0 L_FETCH_2ND_TRAP
-
-L_NO_SINGLE_STEP_WORKAROUND:
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
end
-
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
s_cbranch_scc1 L_SAVE
- // If STATUS.MEM_VIOL is asserted then halt the wave to prevent
- // the exception raising again and blocking context save.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
- s_cbranch_scc0 L_FETCH_2ND_TRAP
- s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
-
L_FETCH_2ND_TRAP:
-
-#if ASIC_TARGET_NAVI1X
- // Preserve and clear scalar XNACK state before issuing scalar loads.
- // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
- // unused space ttmp11[31:24].
- s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
- s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
- s_or_b32 ttmp11, ttmp11, ttmp3
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_or_b32 ttmp11, ttmp11, ttmp3
- s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+ save_and_clear_ib_sts(ttmp14, ttmp15)
#endif
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
// ttmp12 holds SQ_WAVE_STATUS
+#if HAVE_SENDMSG_RTN
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_waitcnt lgkmcnt(0)
+#else
s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+#endif
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
+ s_waitcnt lgkmcnt(0)
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
+
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
L_NO_NEXT_TRAP:
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
- s_addc_u32 ttmp1, ttmp1, 0
-L_EXCP_CASE:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+ s_cbranch_scc1 L_TRAP_CASE
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Host trap will not cause trap re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
s_and_b32 ttmp1, ttmp1, 0xFFFF
-#if ASIC_TARGET_NAVI1X
- // Restore SQ_WAVE_IB_STS.
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
- s_or_b32 ttmp2, ttmp2, ttmp3
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+ restore_ib_sts(ttmp14, ttmp15)
#endif
// Restore SQ_WAVE_STATUS.
@@ -271,20 +317,8 @@ L_SAVE:
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
-#if ASIC_TARGET_NAVI1X
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+#if HAVE_XNACK
+ save_and_clear_ib_sts(s_save_tmp, s_save_trapsts)
#endif
/* inform SPI the readiness and wait for SPI's go signal */
@@ -292,9 +326,13 @@ L_SAVE:
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+#if HAVE_SENDMSG_RTN
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+#else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+#endif
-#if ASIC_TARGET_NAVI1X
+#if ASIC_FAMILY < CHIP_SIENNA_CICHLID
L_SLEEP:
// sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
// SQ hang, since the 7,8th wave could not get arbit to exec inst, while
@@ -305,16 +343,57 @@ L_SLEEP:
s_waitcnt lgkmcnt(0)
#endif
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+#if NO_SQC_STORE
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] slc:1 glc:1
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+#endif
+
// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
get_wave_size(s_save_ttmps_hi)
get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
-#if ASIC_TARGET_NAVI1X
+#if NO_SQC_STORE
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] inst_offset:0x40 slc:1 glc:1
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+#else
s_store_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
@@ -326,12 +405,6 @@ L_SLEEP:
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
s_mov_b32 s_save_m0, m0
@@ -339,7 +412,7 @@ L_SLEEP:
s_mov_b32 s_save_mem_offset, 0x0
get_wave_size(s_wave_size)
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
// Save and clear vector XNACK state late to free up SGPRs.
s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
s_setreg_imm32_b32 hwreg(HW_REG_SHADER_XNACK_MASK), 0x0
@@ -361,7 +434,9 @@ L_SAVE_4VGPR_WAVE32:
// VGPR Allocated in 4-GPR granularity
+#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
@@ -372,7 +447,9 @@ L_SAVE_4VGPR_WAVE64:
// VGPR Allocated in 4-GPR granularity
+#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
@@ -397,7 +474,8 @@ L_SAVE_HWREG:
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
- write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
@@ -418,9 +496,13 @@ L_SAVE_HWREG:
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
#if NO_SQC_STORE
- // Write HWREG/SGPRs with 32 VGPR lanes, wave32 is common case.
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
s_mov_b32 exec_hi, 0x0
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
#endif
/* save SGPRs */
@@ -506,7 +588,7 @@ L_SAVE_LDS_NORMAL:
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
s_barrier //LDS is used? wait for other waves in the same TG
- s_and_b32 s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
// first wave do LDS save;
@@ -628,7 +710,7 @@ L_SAVE_VGPR_WAVE64:
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =4
s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_END
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
L_SAVE_VGPR_W64_LOOP:
v_movrels_b32 v0, v0 //v0 = v[0+m0]
@@ -646,6 +728,7 @@ L_SAVE_VGPR_W64_LOOP:
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+L_SAVE_SHARED_VGPR:
//Below part will be the save shared vgpr part (new for gfx10)
s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
@@ -674,12 +757,7 @@ L_RESTORE:
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+
//determine it is wave32 or wave64
get_wave_size(s_restore_size)
@@ -722,7 +800,13 @@ L_RESTORE_LDS_NORMAL:
s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
L_RESTORE_LDS_LOOP_W32:
+#if HAVE_BUFFER_LDS_LOAD
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_waitcnt vmcnt(0)
+ ds_store_addtid_b32 v0
+#endif
s_add_u32 m0, m0, 128 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -730,7 +814,13 @@ L_RESTORE_LDS_LOOP_W32:
s_branch L_RESTORE_VGPR
L_RESTORE_LDS_LOOP_W64:
+#if HAVE_BUFFER_LDS_LOAD
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_waitcnt vmcnt(0)
+ ds_store_addtid_b32 v0
+#endif
s_add_u32 m0, m0, 256 // 256 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -765,6 +855,8 @@ L_RESTORE_VGPR_NORMAL:
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
L_RESTORE_VGPR_WAVE32_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -786,6 +878,7 @@ L_RESTORE_VGPR_WAVE32_LOOP:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
+ s_waitcnt vmcnt(0)
s_branch L_RESTORE_SGPR
@@ -796,6 +889,8 @@ L_RESTORE_VGPR_WAVE64:
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
L_RESTORE_VGPR_WAVE64_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -812,6 +907,7 @@ L_RESTORE_VGPR_WAVE64_LOOP:
s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+L_RESTORE_SHARED_VGPR:
//Below part will be the restore shared vgpr part (new for gfx10)
s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
@@ -935,7 +1031,7 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
#endif
@@ -945,8 +1041,10 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
@@ -956,24 +1054,8 @@ L_RESTORE_HWREG:
s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
s_waitcnt lgkmcnt(0)
-#if ASIC_TARGET_NAVI1X
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_REPLAY_W64H_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
-
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+#if HAVE_XNACK
+ restore_ib_sts(s_restore_tmp, s_restore_m0)
#endif
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
@@ -1089,5 +1171,29 @@ end
function get_wave_size(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
- s_or_b32 s_reg, s_save_spi_init_hi, s_reg //share with exec_hi, it's at bit25
+end
+
+function save_and_clear_ib_sts(tmp1, tmp2)
+ // Preserve and clear scalar XNACK state before issuing scalar loads.
+ // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
+ // unused space ttmp11[31:24].
+ s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
+ s_getreg_b32 tmp1, hwreg(HW_REG_IB_STS)
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_lshl_b32 tmp2, tmp2, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_or_b32 ttmp11, ttmp11, tmp2
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 tmp2, tmp2, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_or_b32 ttmp11, ttmp11, tmp2
+ s_andn2_b32 tmp1, tmp1, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp1
+end
+
+function restore_ib_sts(tmp1, tmp2)
+ s_lshr_b32 tmp1, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshr_b32 tmp1, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_and_b32 tmp1, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_or_b32 tmp1, tmp1, tmp2
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp1
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index eed78a04e7c7..6770cbe3250a 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -46,8 +46,6 @@ var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN
/**************************************************************************/
/* variables */
/**************************************************************************/
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
@@ -56,6 +54,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
+var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -72,8 +71,10 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
#endif
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
+var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
@@ -83,37 +84,30 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
+
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
/* Save */
var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
+var S_SAVE_PC_HI_HT_MASK = 0x01000000
var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_SHIFT = 27 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK = 0xF8000000 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 26 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x04000000 //FIXME
-
var s_save_spi_init_lo = exec_lo
var s_save_spi_init_hi = exec_hi
@@ -140,18 +134,9 @@ var s_save_ttmps_hi = s_save_trapsts //no conflict
var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
@@ -199,71 +184,77 @@ L_JUMP_TO_RESTORE:
L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
-if SINGLE_STEP_MISSED_WORKAROUND
- // No single step exceptions if MODE.DEBUG_EN=0.
- s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
- s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
- // Second-level trap already handled exception if STATUS.HALT=1.
- s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- // Prioritize single step exception over context save.
- // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
- s_cbranch_scc0 L_FETCH_2ND_TRAP
+ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
-L_NO_SINGLE_STEP_WORKAROUND:
-end
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+L_CHECK_SAVE:
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
s_cbranch_scc1 L_SAVE //this is the operation for save
- // ********* Handle non-CWSR traps *******************
-
- // Illegal instruction is a non-maskable exception which blocks context save.
- // Halt the wavefront and return from the trap.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
- s_cbranch_scc1 L_HALT_WAVE
-
- // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
- // Instead, halt the wavefront and return from the trap.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
- s_cbranch_scc0 L_FETCH_2ND_TRAP
-
-L_HALT_WAVE:
- // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
- // We cannot prevent further faults. Spin wait until context saved.
- s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
- s_cbranch_scc0 L_NOT_ALREADY_HALTED
-
-L_WAIT_CTX_SAVE:
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
s_sleep 0x10
- s_getreg_b32 ttmp2, hwreg(HW_REG_TRAPSTS)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_TRAPSTS_SAVECTX_MASK
- s_cbranch_scc0 L_WAIT_CTX_SAVE
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_CHECK_TRAP_ID
+
+ s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_bitset1_b32 ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_MODE)
+ s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ s_and_b32 ttmp2, ttmp2, ttmp3
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
-L_NOT_ALREADY_HALTED:
- s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+end
- // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
- // Rewind the PC to prevent this from occurring. The debugger compensates for this.
- s_sub_u32 ttmp0, ttmp0, 0x8
- s_subb_u32 ttmp1, ttmp1, 0x0
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ s_cbranch_scc1 L_SAVE
L_FETCH_2ND_TRAP:
// Preserve and clear scalar XNACK state before issuing scalar reads.
- // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
- s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
- s_or_b32 ttmp11, ttmp11, ttmp3
-
- s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+ save_and_clear_ib_sts(ttmp14)
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
@@ -271,27 +262,48 @@ L_FETCH_2ND_TRAP:
s_getreg_b32 ttmp14, hwreg(HW_REG_SQ_SHADER_TMA_LO)
s_getreg_b32 ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
+ s_waitcnt lgkmcnt(0)
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
+
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
L_NO_NEXT_TRAP:
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
- s_addc_u32 ttmp1, ttmp1, 0
-L_EXCP_CASE:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+ s_cbranch_scc1 L_TRAP_CASE
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Host trap will not cause trap re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- // Restore SQ_WAVE_IB_STS.
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+ restore_ib_sts(ttmp14)
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -312,16 +324,7 @@ L_SAVE:
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+ save_and_clear_ib_sts(s_save_tmp)
/* inform SPI the readiness and wait for SPI's go signal */
s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
@@ -360,12 +363,6 @@ L_SAVE:
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
//FIXME right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi (might need to save them before using them?)
s_mov_b32 s_save_m0, m0 //save M0
@@ -690,12 +687,6 @@ L_RESTORE:
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
/* global mem offset */
// s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
@@ -889,19 +880,7 @@ L_RESTORE:
s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
s_waitcnt lgkmcnt(0)
- //reuse s_restore_m0 as a temp register
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+ restore_ib_sts(s_restore_tmp)
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -910,8 +889,7 @@ L_RESTORE:
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
- s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
/**************************************************************************/
@@ -1078,3 +1056,19 @@ function set_status_without_spi_prio(status, tmp)
s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
end
+
+function save_and_clear_ib_sts(tmp)
+ // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+ s_getreg_b32 tmp, hwreg(HW_REG_IB_STS)
+ s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 tmp, tmp, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
+ s_or_b32 ttmp11, ttmp11, tmp
+ s_setreg_imm32_b32 hwreg(HW_REG_IB_STS), 0x0
+end
+
+function restore_ib_sts(tmp)
+ s_lshr_b32 tmp, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f1a225a20719..8667e3df2d0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -441,10 +441,14 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
- } else {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx10_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
+ } else {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx11_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
}
kfd->cwsr_enabled = true;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 29e9ebf6d8d5..2ebf0132c25b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -531,7 +531,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
- bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
+ bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 2e20f54bb147..8d50d207cf66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1271,6 +1271,12 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
if (!peer_dev)
continue;
+ /* Include the CPU peer in GPU hive if connected over xGMI. */
+ if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
+ dev->node_props.hive_id &&
+ dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ peer_dev->node_props.hive_id = dev->node_props.hive_id;
+
list_for_each_entry(inbound_link, &peer_dev->io_link_props,
list) {
if (inbound_link->node_to != link->node_from)
@@ -1302,22 +1308,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
- /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
- if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
- struct kfd_topology_device *top_dev;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list) {
- if (top_dev->gpu)
- break;
-
- top_dev->node_props.hive_id = gpu->hive_id;
- }
-
- up_read(&topology_lock);
- }
-
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
* else create a Virtual CRAT for this gpu device and then parse that
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a92cfb055c15..70be67a56673 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -769,7 +769,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d invalid!", notify.type);
continue;
}
@@ -5381,17 +5381,19 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
static void
fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
- bool *per_pixel_alpha, bool *global_alpha,
- int *global_alpha_value)
+ bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+ bool *global_alpha, int *global_alpha_value)
{
*per_pixel_alpha = false;
+ *pre_multiplied_alpha = true;
*global_alpha = false;
*global_alpha_value = 0xff;
if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
return;
- if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
+ plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
static const uint32_t alpha_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
@@ -5406,6 +5408,9 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
break;
}
}
+
+ if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+ *pre_multiplied_alpha = false;
}
if (plane_state->alpha < 0xffff) {
@@ -5568,7 +5573,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
return ret;
fill_blending_from_plane_state(
- plane_state, &plane_info->per_pixel_alpha,
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
return 0;
@@ -5615,6 +5620,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->tiling_info = plane_info.tiling_info;
dc_plane_state->visible = plane_info.visible;
dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
@@ -7911,7 +7917,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
plane_cap && plane_cap->per_pixel_alpha) {
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
- BIT(DRM_MODE_BLEND_PREMULTI);
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane, blend_caps);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 02943ca65807..cf1b5f354ae9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -122,7 +122,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 27501b735a9c..a2ade6e93f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -91,7 +91,8 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 3121dd2d2a91..fc3af81ed6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -122,7 +122,8 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e41a48f596a3..f14449401188 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2901,14 +2901,15 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, true);
- else
+ } else {
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
// Stream updates
if (stream_update)
@@ -2924,10 +2925,11 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -3052,10 +3054,11 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67ef357e5798..a789ea8af27f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "gpio_service_interface.h"
#include "core_status.h"
#include "dc_link_dp.h"
+#include "dc_link_dpia.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
#include "opp.h"
@@ -240,7 +241,7 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !link->hpd_status)
+ if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -1604,8 +1605,25 @@ static bool dc_link_construct_legacy(struct dc_link *link,
if (link->hpd_gpio) {
if (!link->dc->config.allow_edp_hotplug_detection)
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case 1: // only the 1st eDP handles hotplug
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case 2: // only the 2nd eDP handles hotplug
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ break;
+ }
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 340b5f90a82a..dc30ac366a50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2783,31 +2783,37 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings current_setting = *link_setting;
+ struct dc_link_settings cur_link_settings = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
dp_trace_commit_lt_init(link);
- if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+ if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
* pattern in SST mode will be sent right after the link training
*/
link_hwss->setup_stream_encoder(pipe_ctx);
dp_trace_set_lt_start_timestamp(link, false);
- for (j = 0; j < attempts; ++j) {
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_enable_link_phy(
link,
&pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
- &current_setting);
+ &cur_link_settings);
if (stream->sink_patches.dppowerup_delay > 0) {
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -2832,30 +2838,30 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
}
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
return true;
}
@@ -2866,8 +2872,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
break;
- DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
@@ -2876,27 +2883,49 @@ bool perform_link_training_with_retries(
enum dc_connection_type type = dc_connection_none;
dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
- } else if (do_fallback) {
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(link, *link_setting, &current_setting, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
+ decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &current_setting);
- if (req_bw > link_bw)
- break;
+ link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, req_bw, link_bw);
}
msleep(delay_between_attempts);
-
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
-
return false;
}
@@ -5097,13 +5126,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-void dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+ uint8_t lttpr_dpcd_data[8];
bool allow_lttpr_non_transparent_mode = 0;
+ bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
- memset(link->lttpr_dpcd_data, '\0', sizeof(link->lttpr_dpcd_data));
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
@@ -5113,116 +5145,82 @@ void dp_retrieve_lttpr_cap(struct dc_link *link)
allow_lttpr_non_transparent_mode = 1;
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- link->lttpr_support = LTTPR_UNSUPPORTED;
-
/*
- * Logic to determine LTTPR support
+ * Logic to determine LTTPR mode
*/
- if (vbios_lttpr_interop)
- link->lttpr_support = LTTPR_SUPPORTED;
- else if (link->dc->config.allow_lttpr_non_transparent_mode.raw == 0
- || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_support = LTTPR_UNSUPPORTED;
- else
- link->lttpr_support = LTTPR_CHECK_EXT_SUPPORT;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (vbios_lttpr_enable && vbios_lttpr_interop)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+ if (allow_lttpr_non_transparent_mode)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else
+ link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+ } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_support = LTTPR_UNSUPPORTED;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
- if (link->lttpr_support > LTTPR_UNSUPPORTED) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
* LTTPR extended aux timeout if LTTPR is present.
*/
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- link->lttpr_dpcd_data,
- sizeof(link->lttpr_dpcd_data));
- }
-}
-
-bool dp_parse_lttpr_mode(struct dc_link *link)
-{
- bool dpcd_allow_lttpr_non_transparent_mode = false;
- bool is_lttpr_present = false;
-
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
-
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- dpcd_allow_lttpr_non_transparent_mode = true;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- dpcd_allow_lttpr_non_transparent_mode = true;
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ if (is_lttpr_present) {
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ configure_lttpr_mode_transparent(link);
+ } else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
-
- /*
- * Logic to determine LTTPR mode
- */
- if (link->lttpr_support == LTTPR_SUPPORTED)
- if (vbios_lttpr_enable)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (dpcd_allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- else // lttpr_support == LTTPR_CHECK_EXT_SUPPORT
- if (dpcd_allow_lttpr_non_transparent_mode) {
- link->lttpr_support = LTTPR_SUPPORTED;
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- } else {
- link->lttpr_support = LTTPR_UNSUPPORTED;
- }
-
- if (link->lttpr_support == LTTPR_UNSUPPORTED)
- return false;
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- link->lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- link->lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- link->lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- link->lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, link->lttpr_dpcd_data, sizeof(link->lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
return is_lttpr_present;
}
@@ -5374,8 +5372,7 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
- dp_retrieve_lttpr_cap(link);
-
+ is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5411,9 +5408,6 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (link->lttpr_support > LTTPR_UNSUPPORTED)
- is_lttpr_present = dp_parse_lttpr_mode(link);
-
if (!is_lttpr_present)
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index a5765f36d86f..1b7a8774b0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
#include "inc/link_dpcd.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER \
link->ctx->logger
@@ -69,6 +70,24 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ return is_hpd_high;
+}
+
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
* Abort link training if sink unplug detected.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6b9c6a71841..5bc6ff2fa73e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -61,6 +61,8 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->blend_tf->type = TF_TYPE_BYPASS;
}
+ plane_state->pre_multiplied_alpha = true;
+
}
static void dc_plane_destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 26c24db8f1da..3960c74482be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.185"
+#define DC_VER "3.2.186"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -329,7 +329,7 @@ struct dc_config {
bool disable_dmcu;
bool enable_4to1MPC;
bool enable_windowed_mpo_odm;
- bool allow_edp_hotplug_detection;
+ uint32_t allow_edp_hotplug_detection;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -1011,6 +1011,7 @@ struct dc_plane_state {
bool is_tiling_rotated;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool visible;
@@ -1045,6 +1046,7 @@ struct dc_plane_info {
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 251f2bbc96b9..a3c37ee3f849 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -129,8 +129,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- uint8_t lttpr_dpcd_data[8];
- enum lttpr_support lttpr_support;
enum lttpr_mode lttpr_mode;
bool is_internal_display;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 29e20d92b0bb..9e39cd7b203e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -87,7 +87,8 @@ static void release_engine(
engine->ddc = NULL;
- REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1,
+ AUX_SW_USE_AUX_REG_REQ, 0);
}
#define SW_CAN_ACCESS_AUX 1
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5e6fea85a7b5..845aa8a1027d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1101,9 +1101,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
- *pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
- modulo_hz);
+ if (modulo_hz)
+ *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ modulo_hz);
+ else
+ *pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e02ac75afbf7..e3a62873c0e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2550,12 +2550,21 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && pipe_ctx->plane_state->pre_multiplied_alpha);
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2564,14 +2573,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
else
blnd_cfg.global_alpha = 0xff;
- /* DCN1.0 has output CM before MPC which seems to screw with
- * pre-multiplied alpha.
- */
- blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
- pipe_ctx->stream->output_color_space)
- && per_pixel_alpha;
-
-
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1f87bd72e4a..ec6aa8d8b251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1773,7 +1773,6 @@ void dcn20_post_unlock_program_front_end(
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
@@ -2346,12 +2345,16 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2365,7 +2368,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
- blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
if (pipe_ctx->plane_state->format
== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
blnd_cfg.pre_multiplied_alpha = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index f5e8916601d3..b604fb26f288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#define REG(reg)\
optc1->tg_regs->reg
@@ -184,6 +186,14 @@ void optc3_set_dsc_config(struct timing_generator *optc,
}
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+ DC_FP_START();
+ optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
+ DC_FP_END();
+}
+
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 336b2ce6a636..1c1a67c4cec1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -84,6 +84,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
@@ -91,137 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_0_ip = {
- .use_min_dcfclk = 0,
- .clamp_min_dcfclk = 0,
- .odm_capable = 1,
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
- .gpuvm_max_page_table_levels = 4,
- .hostvm_max_page_table_levels = 4,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 6,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 84,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .max_page_table_levels = 2,
- .pte_chunk_size_kbytes = 2, // ?
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 0, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 6,
- .max_num_dpp = 6,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 562.0,
- .dppclk_mhz = 300.0,
- .phyclk_mhz = 300.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 405.6,
- },
- },
- .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
- .num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 40.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 404,
- .dummy_pstate_latency_us = 5,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3650,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .do_urgent_latency_adjustment = true,
- .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
enum dcn30_clk_src_array_id {
DCN30_CLK_SRC_PLL0,
DCN30_CLK_SRC_PLL1,
@@ -1480,90 +1350,9 @@ int dcn30_populate_dml_pipes_from_context(
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
- int pipe_cnt, i, j;
- double max_calc_writeback_dispclk;
- double writeback_dispclk;
- struct writeback_st dout_wb;
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
-
- if (!stream)
- continue;
- max_calc_writeback_dispclk = 0;
-
- /* Set writeback information */
- pipes[pipe_cnt].dout.wb_enable = 0;
- pipes[pipe_cnt].dout.num_active_wb = 0;
- for (j = 0; j < stream->num_wb_info; j++) {
- struct dc_writeback_info *wb_info = &stream->writeback_info[j];
-
- if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
- (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
- pipes[pipe_cnt].dout.wb_enable = 1;
- pipes[pipe_cnt].dout.num_active_wb++;
- dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_height :
- wb_info->dwb_params.cnv_params.src_height;
- dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_width :
- wb_info->dwb_params.cnv_params.src_width;
- dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
- dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
-
- /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
- if (dc->dml.ip.writeback_max_hscl_taps > 1) {
- dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
- dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
- } else {
- dout_wb.wb_htaps_luma = 1;
- dout_wb.wb_vtaps_luma = 1;
- }
- dout_wb.wb_htaps_chroma = 0;
- dout_wb.wb_vtaps_chroma = 0;
- dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_width /
- (double)wb_info->dwb_params.dest_width :
- (double)wb_info->dwb_params.cnv_params.src_width /
- (double)wb_info->dwb_params.dest_width;
- dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_height /
- (double)wb_info->dwb_params.dest_height :
- (double)wb_info->dwb_params.cnv_params.src_height /
- (double)wb_info->dwb_params.dest_height;
- if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
- wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
- dout_wb.wb_pixel_format = dm_444_64;
- else
- dout_wb.wb_pixel_format = dm_444_32;
-
- /* Workaround for cases where multiple writebacks are connected to same plane
- * In which case, need to compute worst case and set the associated writeback parameters
- * This workaround is necessary due to DML computation assuming only 1 set of writeback
- * parameters per pipe
- */
- writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
- dout_wb.wb_pixel_format,
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
- dout_wb.wb_hratio,
- dout_wb.wb_vratio,
- dout_wb.wb_htaps_luma,
- dout_wb.wb_vtaps_luma,
- dout_wb.wb_src_width,
- dout_wb.wb_dst_width,
- pipes[pipe_cnt].pipe.dest.htotal,
- dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
-
- if (writeback_dispclk > max_calc_writeback_dispclk) {
- max_calc_writeback_dispclk = writeback_dispclk;
- pipes[pipe_cnt].dout.wb = dout_wb;
- }
- }
- }
-
- pipe_cnt++;
- }
-
+ DC_FP_START();
+ dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+ DC_FP_END();
}
unsigned int dcn30_calc_max_scaled_time(
@@ -1598,7 +1387,7 @@ void dcn30_set_mcif_arb_params(
enum mmhubbub_wbif_mode wbif_mode;
struct display_mode_lib *dml = &context->bw_ctx.dml;
struct mcif_arb_params *wb_arb_params;
- int i, j, k, dwb_pipe;
+ int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
@@ -1622,17 +1411,15 @@ void dcn30_set_mcif_arb_params(
else
wbif_mode = PACKED_444;
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- }
+ DC_FP_START();
+ dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
+ DC_FP_END();
wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
- wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
dwb_pipe++;
@@ -2111,178 +1898,11 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
- int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- /* Set B:
- * DCFCLK: 1GHz or min required above 1GHz
- * FCLK/UCLK: Max
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set D:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
- */
- /*
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- */
-
- /* Set C:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * pstate latency overridden to 5us
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- unsigned int min_dram_speed_mts_margin = 160;
-
- if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
-
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (!pstate_en) {
- /* The only difference between A and C is p-state latency, if p-state is not supported we want to
- * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- /* Make set D = set A until set D is enabled */
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_update_soc_for_wm_a(dc, context);
DC_FP_END();
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- }
}
void dcn30_calculate_wm_and_dlg(
@@ -2292,7 +1912,7 @@ void dcn30_calculate_wm_and_dlg(
int vlevel)
{
DC_FP_START();
- dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
@@ -2351,40 +1971,6 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
-{
- double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
- bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
- bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
- bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
- if (optimal_fclk)
- *optimal_fclk = bw_from_dram /
- (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
- if (optimal_dcfclk)
- *optimal_dcfclk = bw_from_dram /
- (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
@@ -2399,47 +1985,43 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
+ struct dc_bounding_box_max_clk dcn30_bb_max_clk;
+
+ memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
+
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ DC_FP_START();
+ dcn30_fpu_update_dram_channel_width_bytes(dc);
+ DC_FP_END();
if (bw_params->clk_table.entries[0].memclk_mhz) {
- int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
+ dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
+ dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
+ dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
+ dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
- if (!max_dcfclk_mhz)
- max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
- if (!max_dispclk_mhz)
- max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
- if (!max_dppclk_mhz)
- max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
- if (!max_phyclk_mhz)
- max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+ DC_FP_START();
+ dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
+ DC_FP_END();
- if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
num_dcfclk_sta_targets++;
- } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
- dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
break;
}
}
@@ -2452,7 +2034,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2479,7 +2061,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
@@ -2494,33 +2076,15 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_0_soc.num_states = num_states;
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state = i;
- dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
- /* Fill all states with max values of all other clocks */
- dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
- dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
- dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
- /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
- dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
- dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
- }
- /* re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ DC_FP_START();
+ dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
+ DC_FP_END();
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b92e4cc0232f..3330a1026fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -35,6 +35,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_0_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc;
+
struct dcn30_resource_pool {
struct resource_pool base;
};
@@ -96,4 +99,6 @@ enum dc_status dcn30_add_stream_to_ctx(
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
+
#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4daf8931aa7c..a5df74110284 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -81,6 +81,8 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dml/dcn30/display_mode_vba_30.h"
#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index f0938653bb88..f537888f4fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -43,6 +43,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dce/dce_abm.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4fcbc0502808..76f863eb86ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -25,6 +25,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dc_link_ddc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index ccf1b71a8269..3d9f07d4770b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -36,6 +36,8 @@
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index ee911452c048..a64b88ca01a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -113,7 +114,7 @@ DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
-DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
DML += dcn31/dcn31_fpu.o
DML += dcn301/dcn301_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
new file mode 100644
index 000000000000..574676a0711a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "dcn_calc_math.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+
+#include "display_mode_vba_30.h"
+#include "dcn30_fpu.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+ .use_min_dcfclk = 0,
+ .clamp_min_dcfclk = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 6,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 2,
+ .pte_chunk_size_kbytes = 2, // ?
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 0, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 0,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 6,
+ .max_num_dpp = 6,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 1,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 562.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 300.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 405.6,
+ },
+ },
+
+ .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ .num_states = 1,
+ .sr_exit_time_us = 15.5,
+ .sr_enter_plus_exit_time_us = 20,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404,
+ .dummy_pstate_latency_us = 5,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3650,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .do_urgent_latency_adjustment = true,
+ .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ double vtotal_min, vtotal_max;
+ double ratio, modulo, phase;
+ uint32_t vblank_start;
+ uint32_t v_total_mask_value = 0;
+
+ dc_assert_fp_enabled();
+
+ /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
+ * VOTAL_MAX - VTOTAL_MIN = 1
+ */
+ v_total_mask_value = 16;
+ vtotal_min = dcn_bw_floor(vtotal_avg);
+ vtotal_max = dcn_bw_ceil(vtotal_avg);
+
+ /* Check that bottom VBLANK is at least 2 lines tall when running with
+ * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
+ * of lines in a frame - 1'.
+ */
+ REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
+ &vblank_start);
+ ASSERT(vtotal_min >= vblank_start + 1);
+
+ /* Special case where the average frame rate can be achieved
+ * without using the DTO
+ */
+ if (vtotal_min == vtotal_max) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+
+ ratio = vtotal_max - vtotal_avg;
+ modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
+ phase = ratio * modulo;
+
+ /* Special cases where the DTO phase gets rounded to 0 or
+ * to DTO modulo
+ */
+ if (phase <= 0 || phase >= modulo) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
+ phase <= 0 ?
+ (uint32_t)vtotal_max : (uint32_t)vtotal_min);
+ REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
+ REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+ REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
+ OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
+ OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+ optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
+}
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ int pipe_cnt, i, j;
+ double max_calc_writeback_dispclk;
+ double writeback_dispclk;
+ struct writeback_st dout_wb;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+ if (!stream)
+ continue;
+ max_calc_writeback_dispclk = 0;
+
+ /* Set writeback information */
+ pipes[pipe_cnt].dout.wb_enable = 0;
+ pipes[pipe_cnt].dout.num_active_wb = 0;
+ for (j = 0; j < stream->num_wb_info; j++) {
+ struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+ if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+ (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+ pipes[pipe_cnt].dout.wb_enable = 1;
+ pipes[pipe_cnt].dout.num_active_wb++;
+ dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+ dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+ dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+ /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+ if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+ dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+ } else {
+ dout_wb.wb_htaps_luma = 1;
+ dout_wb.wb_vtaps_luma = 1;
+ }
+ dout_wb.wb_htaps_chroma = 0;
+ dout_wb.wb_vtaps_chroma = 0;
+ dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ dout_wb.wb_pixel_format = dm_444_64;
+ else
+ dout_wb.wb_pixel_format = dm_444_32;
+
+ /* Workaround for cases where multiple writebacks are connected to same plane
+ * In which case, need to compute worst case and set the associated writeback parameters
+ * This workaround is necessary due to DML computation assuming only 1 set of writeback
+ * parameters per pipe
+ */
+ writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+ dout_wb.wb_pixel_format,
+ pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+ dout_wb.wb_hratio,
+ dout_wb.wb_vratio,
+ dout_wb.wb_htaps_luma,
+ dout_wb.wb_vtaps_luma,
+ dout_wb.wb_src_width,
+ dout_wb.wb_dst_width,
+ pipes[pipe_cnt].pipe.dest.htotal,
+ dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+ if (writeback_dispclk > max_calc_writeback_dispclk) {
+ max_calc_writeback_dispclk = writeback_dispclk;
+ pipes[pipe_cnt].dout.wb = dout_wb;
+ }
+ }
+ }
+
+ pipe_cnt++;
+ }
+}
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe)
+{
+ int i;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+ wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ }
+
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+}
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+
+dc_assert_fp_enabled();
+
+if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+
+dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ /* Set B:
+ * DCFCLK: 1GHz or min required above 1GHz
+ * FCLK/UCLK: Max
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set D:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+ */
+ /*
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
+
+ /* Set C:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * pstate latency overridden to 5us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (!pstate_en) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+ * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ /* Make set D = set A until set D is enabled */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ DC_FP_START();
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+
+}
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+}
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
+{
+ dc_assert_fp_enabled();
+
+ if (!dcn30_bb_max_clk->max_dcfclk_mhz)
+ dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+ if (!dcn30_bb_max_clk->max_dispclk_mhz)
+ dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+ if (!dcn30_bb_max_clk->max_dppclk_mhz)
+ dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+ if (!dcn30_bb_max_clk->max_phyclk_mhz)
+ dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+}
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ dc_assert_fp_enabled();
+
+ bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts)
+{
+ unsigned int i;
+
+ dc_assert_fp_enabled();
+
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state = i;
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = dcn30_bb_max_clk->max_dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = dcn30_bb_max_clk->max_phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+ dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
new file mode 100644
index 000000000000..dedfe7b5f173
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_FPU_H__
+#define __DCN30_FPU_H__
+
+#include "core_types.h"
+#include "dcn20/dcn20_optc.h"
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg);
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe);
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc);
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk);
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk);
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts);
+
+
+#endif /* __DCN30_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 26f3a55c35d7..555d4d9e1454 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -486,4 +486,11 @@ struct dc_state {
} perf_params;
};
+struct dc_bounding_box_max_clk {
+ int max_dcfclk_mhz;
+ int max_dispclk_mhz;
+ int max_dppclk_mhz;
+ int max_phyclk_mhz;
+};
+
#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 851b98299063..a3c1e9c56d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -217,8 +217,7 @@ void disable_dp_hpo_output(struct dc_link *link,
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_apply_lttpr_mode(struct dc_link *link);
+bool dp_retrieve_lttpr_cap(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 74dafd0f9d3d..39c1d1d07357 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -87,6 +87,11 @@ union dpia_set_config_data {
*/
enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 9f465b4d626e..447a56286dd0 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -80,12 +80,6 @@ enum link_training_result {
DP_128b_132b_CDS_DONE_TIMEOUT,
};
-enum lttpr_support {
- LTTPR_UNSUPPORTED,
- LTTPR_CHECK_EXT_SUPPORT,
- LTTPR_SUPPORTED,
-};
-
enum lttpr_mode {
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
index c755f43aaaf8..7a2c6b12c249 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
@@ -6070,6 +6070,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
index 14a3bacfcfd1..fa1f4374fafe 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
@@ -6058,6 +6058,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
index 106094ed0661..39f6fde6db1d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
@@ -7142,6 +7142,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
index bcd190a3fcdd..c5f4afac3b39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
@@ -37285,12 +37285,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
index 9b6825b74cc1..23580907663b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
@@ -5584,6 +5584,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index e7c0cad41081..a788ff3b68c0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -30357,12 +30357,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index dc8ce7aaa0cf..c70f7ba94d8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -39439,12 +39439,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
index 91969554e36a..ca1e1eb39256 100755
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
@@ -16956,7 +16956,7 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
@@ -16964,7 +16964,7 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
index 2f780aefc722..6104ae304099 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -35487,12 +35487,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5472f9936feb..d1bf073adf54 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -770,6 +770,9 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
+ if (!pp_funcs)
+ return AMD_DPM_FORCED_LEVEL_AUTO;
+
mutex_lock(&adev->pm.mutex);
if (pp_funcs->get_performance_level)
level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 6016b325b6b5..a601024ba4de 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1436,6 +1436,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
+ case IP_VERSION(13, 0, 0):
return 0;
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index ecc6411dfc8d..c1f76236da26 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -671,8 +671,8 @@ typedef struct {
uint16_t reserved[2];
//Frequency changes
- uint16_t GfxclkFmin; // MHz
- uint16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -683,15 +683,14 @@ typedef struct {
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
uint32_t Spare[13];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
@@ -719,15 +718,14 @@ typedef struct {
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
uint32_t Spare[13];
@@ -997,7 +995,8 @@ typedef struct {
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
- uint32_t SpareVmin[12];
+ QuadraticInt_t Vmin_droop;
+ uint32_t SpareVmin[9];
//SECTION: DPM Configuration 1
@@ -1286,7 +1285,6 @@ typedef struct {
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
-
// SECTION: Board Reserved
uint32_t BoardSpare[64];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 2b44d41a5157..afa1991e26f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x27
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x28
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x28
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d68be8f8850e..78f3d9e722bb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -697,12 +697,28 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900))
- use_metrics_v3 = true;
- else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300))
- use_metrics_v2 = true;
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ if (smu->smc_fw_version >= 0x3A4900)
+ use_metrics_v3 = true;
+ else if (smu->smc_fw_version >= 0x3A4300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 11):
+ if (smu->smc_fw_version >= 0x412D00)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 12):
+ if (smu->smc_fw_version >= 0x3B2300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 13):
+ if (smu->smc_fw_version >= 0x491100)
+ use_metrics_v2 = true;
+ break;
+ default:
+ break;
+ }
ret = smu_cmn_get_metrics_table(smu,
NULL,
@@ -3833,13 +3849,28 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900))
- use_metrics_v3 = true;
- else if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300))
- use_metrics_v2 = true;
-
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ if (smu->smc_fw_version >= 0x3A4900)
+ use_metrics_v3 = true;
+ else if (smu->smc_fw_version >= 0x3A4300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 11):
+ if (smu->smc_fw_version >= 0x412D00)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 12):
+ if (smu->smc_fw_version >= 0x3B2300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 13):
+ if (smu->smc_fw_version >= 0x491100)
+ use_metrics_v2 = true;
+ break;
+ default:
+ break;
+ }
ret = smu_cmn_get_metrics_table(smu,
&metrics_external,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b87f550af26b..5f8809f6990d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -781,7 +781,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
}
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 38af648cb857..fb130409309c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1666,6 +1666,7 @@ static const struct throttling_logging_label {
uint32_t feature_mask;
const char *label;
} logging_label[] = {
+ {(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ae6321af9d88..ef9b56de143b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -218,13 +218,25 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
pptable_id == 3688)
pptable_id = 36881;
/*
- * Temporary solution for SMU V13.0.0:
- * - use 99991 signed pptable when SCPM enabled
- * TODO: drop this when the pptable carried in vbios
- * is ready.
+ * Temporary solution for SMU V13.0.0 with SCPM enabled:
+ * - use 36831 signed pptable when pp_table_id is 3683
+ * - use 36641 signed pptable when pp_table_id is 3664 or 0
+ * TODO: drop these when the pptable carried in vbios is ready.
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
- pptable_id = 99991;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 0:
+ case 3664:
+ pptable_id = 36641;
+ break;
+ case 3683:
+ pptable_id = 36831;
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -448,13 +460,24 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
pptable_id = smu->smu_table.boot_values.pp_table_id;
/*
- * Temporary solution for SMU V13.0.0:
- * - use 9999 unsigned pptable when SCPM disabled
- * TODO: drop this when the pptable carried in vbios
- * is ready.
+ * Temporary solution for SMU V13.0.0 with SCPM disabled:
+ * - use 3664 or 3683 on request
+ * - use 3664 when pptable_id is 0
+ * TODO: drop these when the pptable carried in vbios is ready.
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
- pptable_id = 9999;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 0:
+ pptable_id = 3664;
+ break;
+ case 3664:
+ case 3683:
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
}
/* force using vbios pptable in sriov mode */
@@ -814,7 +837,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
feature->feature_num < 64)
return -EINVAL;
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 197a0e2ff063..7432b3e76d3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -275,9 +275,7 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
-#if 0
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-#endif
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
@@ -296,6 +294,12 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 7d6ff141b43f..5a17b51aa0f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -644,42 +644,40 @@ static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu,
if (!table || !clock_ranges)
return -EINVAL;
- if (clock_ranges) {
- if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
- return -EINVAL;
-
- for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
- table->WatermarkRow[WM_DCFCLK][i].MinClock =
- clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MaxClock =
- clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MinMclk =
- clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
- clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
-
- table->WatermarkRow[WM_DCFCLK][i].WmSetting =
- clock_ranges->reader_wm_sets[i].wm_inst;
- }
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ return -EINVAL;
- for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MinMclk =
- clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
- clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
-
- table->WatermarkRow[WM_SOCCLK][i].WmSetting =
- clock_ranges->writer_wm_sets[i].wm_inst;
- }
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
+ }
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
/* pass data to smu controller */
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 87257b1b028f..feff4f8c927c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -190,6 +190,9 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index e957d4851dc0..f024dc93939e 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -69,7 +69,7 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+ defined(__mips__) || defined(__loongarch__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 3e3b09588fd3..958b37123bf1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1047,7 +1047,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
GEM_BUG_ON(!pmu->base.event_init);
/* Select the first online CPU as a designated reader. */
- if (!cpumask_weight(&i915_pmu_cpumask))
+ if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
return 0;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9c8829f945b2..f7863d6dea80 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 52516eb20cb8..3a462e327e0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -541,7 +541,6 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
@@ -559,7 +558,6 @@ static int dpu_encoder_virt_atomic_check(
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
@@ -1814,7 +1812,6 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- dsc_common_mode = 0;
pic_width = dsc->drm->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4829d1ce0cf8..59da348ff339 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -574,11 +574,11 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
- DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
-
if (!phys_enc)
return;
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
kfree(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index bce47647d891..e23e2552e802 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -49,8 +49,6 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
-#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
-
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -1305,15 +1303,9 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
- int i;
ddev = dpu_kms->dev;
- WARN_ON(!(dpu_kms->num_paths));
- /* Min vote of BW is required before turning on AXI clk */
- for (i = 0; i < dpu_kms->num_paths; i++)
- icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
-
rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d21971baa24c..b7f5b8d3bbd6 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1390,8 +1390,13 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
dp_catalog_ctrl_reset(ctrl->catalog);
- if (enable)
- dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ /*
+ * all dp controller programmable registers will not
+ * be reset to default value after DP_SW_RESET
+ * therefore interrupt mask bits have to be updated
+ * to enable/disable interrupts
+ */
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 0454a571adf7..e13c5c12b775 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
@@ -25,6 +26,8 @@
#define UBWC_CTRL_2 0x150
#define UBWC_PREDICTION_MODE 0x154
+#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+
struct msm_mdss {
struct device *dev;
@@ -36,8 +39,47 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
+ struct icc_path *path[2];
+ u32 num_paths;
};
+static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ struct msm_mdss *msm_mdss)
+{
+ struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ msm_mdss->path[0] = path0;
+ msm_mdss->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ msm_mdss->path[1] = path1;
+ msm_mdss->num_paths++;
+ }
+
+ return 0;
+}
+
+static void msm_mdss_put_icc_path(void *data)
+{
+ struct msm_mdss *msm_mdss = data;
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_put(msm_mdss->path[i]);
+}
+
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+{
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+}
+
static void msm_mdss_irq(struct irq_desc *desc)
{
struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
@@ -136,6 +178,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
+ /*
+ * Several components have AXI clocks that can only be turned on if
+ * the interconnect is enabled (non-zero bandwidth). Let's make sure
+ * that the interconnects are at least at a minimum amount.
+ */
+ msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
@@ -178,6 +227,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+ msm_mdss_icc_request_bw(msm_mdss, 0);
return 0;
}
@@ -271,6 +321,13 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
else
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a16892c16f60..58db79921cd3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index a3ad7c9736ec..b3fffe7b5062 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -74,7 +74,7 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__)
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 6815b4db17c1..1861a8180d3f 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,8 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+config TEGRA_HOST1X_CONTEXT_BUS
+ bool
+
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
select DMA_SHARED_BUFFER
+ select TEGRA_HOST1X_CONTEXT_BUS
select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index d2b6f7de0498..c891a3e33844 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -18,3 +18,4 @@ host1x-y = \
hw/host1x07.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
+obj-$(CONFIG_TEGRA_HOST1X_CONTEXT_BUS) += context_bus.o
diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c
new file mode 100644
index 000000000000..b0d35b2bbe89
--- /dev/null
+++ b/drivers/gpu/host1x/context_bus.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct bus_type host1x_context_device_bus_type = {
+ .name = "host1x-context",
+};
+EXPORT_SYMBOL_GPL(host1x_context_device_bus_type);
+
+static int __init host1x_context_device_bus_init(void)
+{
+ int err;
+
+ if (!of_machine_is_compatible("nvidia,tegra186") &&
+ !of_machine_is_compatible("nvidia,tegra194") &&
+ !of_machine_is_compatible("nvidia,tegra234"))
+ return 0;
+
+ err = bus_register(&host1x_context_device_bus_type);
+ if (err < 0) {
+ pr_err("bus type registration failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+postcore_initcall(host1x_context_device_bus_init);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 54752c85604b..4490e2f7252a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -387,7 +387,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
- usbhid->urbctrl->pipe, 0);
+ usbhid->urbctrl->pipe);
len += (len == 0); /* Don't allow 0-length reports */
len = round_up(len, maxpacket);
if (len > usbhid->bufsize)
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index df02002066ce..b4b007c4beb6 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -279,7 +279,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
kbd = kzalloc(sizeof(struct usb_kbd), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index c89332017d5d..fb1d7d1f6999 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -123,7 +123,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
mouse = kzalloc(sizeof(struct usb_mouse), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/hte/Kconfig b/drivers/hte/Kconfig
new file mode 100644
index 000000000000..cf29e0218bae
--- /dev/null
+++ b/drivers/hte/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig HTE
+ bool "Hardware Timestamping Engine (HTE) Support"
+ help
+ Hardware Timestamping Engine (HTE) Support.
+
+ Some devices provide a hardware timestamping engine which can
+ timestamp certain device lines/signals in realtime. It comes with a
+ benefit for the applications needing accurate timestamping event with
+ less jitter. This framework provides a generic interface to such HTE
+ providers and consumer devices.
+
+ If unsure, say no.
+
+if HTE
+
+config HTE_TEGRA194
+ tristate "NVIDIA Tegra194 HTE Support"
+ depends on ARCH_TEGRA_194_SOC
+ help
+ Enable this option for integrated hardware timestamping engine also
+ known as generic timestamping engine (GTE) support on NVIDIA Tegra194
+ systems-on-chip. The driver supports 352 LIC IRQs and 39 AON GPIOs
+ lines for timestamping in realtime.
+
+config HTE_TEGRA194_TEST
+ tristate "NVIDIA Tegra194 HTE Test"
+ depends on HTE_TEGRA194
+ help
+ The NVIDIA Tegra194 GTE test driver demonstrates how to use HTE
+ framework to timestamp GPIO and LIC IRQ lines.
+
+endif
diff --git a/drivers/hte/Makefile b/drivers/hte/Makefile
new file mode 100644
index 000000000000..8cca124849d2
--- /dev/null
+++ b/drivers/hte/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HTE) += hte.o
+obj-$(CONFIG_HTE_TEGRA194) += hte-tegra194.o
+obj-$(CONFIG_HTE_TEGRA194_TEST) += hte-tegra194-test.o
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
new file mode 100644
index 000000000000..5d776a185bd6
--- /dev/null
+++ b/drivers/hte/hte-tegra194-test.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/hte.h>
+
+/*
+ * This sample HTE GPIO test driver demonstrates HTE API usage by enabling
+ * hardware timestamp on gpio_in and specified LIC IRQ lines.
+ *
+ * Note: gpio_out and gpio_in need to be shorted externally in order for this
+ * test driver to work for the GPIO monitoring. The test driver has been
+ * tested on Jetson AGX Xavier platform by shorting pin 32 and 16 on 40 pin
+ * header.
+ *
+ * Device tree snippet to activate this driver:
+ * tegra_hte_test {
+ * compatible = "nvidia,tegra194-hte-test";
+ * in-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 1)>;
+ * out-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 0)>;
+ * timestamps = <&tegra_hte_aon TEGRA194_AON_GPIO(BB, 1)>,
+ * <&tegra_hte_lic 0x19>;
+ * timestamp-names = "hte-gpio", "hte-i2c-irq";
+ * status = "okay";
+ * };
+ *
+ * How to run test driver:
+ * - Load test driver.
+ * - For the GPIO, at regular interval gpio_out pin toggles triggering
+ * HTE for rising edge on gpio_in pin.
+ *
+ * - For the LIC IRQ line, it uses 0x19 interrupt which is i2c controller 1.
+ * - Run i2cdetect -y 1 1>/dev/null, this command will generate i2c bus
+ * transactions which creates timestamp data.
+ * - It prints below message for both the lines.
+ * HW timestamp(<line id>:<ts seq number>): <timestamp>, edge: <edge>.
+ * - Unloading the driver disables and deallocate the HTE.
+ */
+
+static struct tegra_hte_test {
+ int gpio_in_irq;
+ struct device *pdev;
+ struct gpio_desc *gpio_in;
+ struct gpio_desc *gpio_out;
+ struct hte_ts_desc *desc;
+ struct timer_list timer;
+ struct kobject *kobj;
+} hte;
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+ char *edge;
+ struct hte_ts_desc *desc = p;
+
+ if (!ts || !p)
+ return HTE_CB_HANDLED;
+
+ if (ts->raw_level < 0)
+ edge = "Unknown";
+
+ pr_info("HW timestamp(%u: %llu): %llu, edge: %s\n",
+ desc->attr.line_id, ts->seq, ts->tsc,
+ (ts->raw_level >= 0) ? ((ts->raw_level == 0) ?
+ "falling" : "rising") : edge);
+
+ return HTE_CB_HANDLED;
+}
+
+static void gpio_timer_cb(struct timer_list *t)
+{
+ (void)t;
+
+ gpiod_set_value(hte.gpio_out, !gpiod_get_value(hte.gpio_out));
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(8000));
+}
+
+static irqreturn_t tegra_hte_test_gpio_isr(int irq, void *data)
+{
+ (void)irq;
+ (void)data;
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id tegra_hte_test_of_match[] = {
+ { .compatible = "nvidia,tegra194-hte-test"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_test_of_match);
+
+static int tegra_hte_test_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i, cnt;
+
+ dev_set_drvdata(&pdev->dev, &hte);
+ hte.pdev = &pdev->dev;
+
+ hte.gpio_out = gpiod_get(&pdev->dev, "out", 0);
+ if (IS_ERR(hte.gpio_out)) {
+ dev_err(&pdev->dev, "failed to get gpio out\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hte.gpio_in = gpiod_get(&pdev->dev, "in", 0);
+ if (IS_ERR(hte.gpio_in)) {
+ dev_err(&pdev->dev, "failed to get gpio in\n");
+ ret = -EINVAL;
+ goto free_gpio_out;
+ }
+
+ ret = gpiod_direction_output(hte.gpio_out, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set output\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_direction_input(hte.gpio_in);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set input\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_to_irq(hte.gpio_in);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+ ret = -ENXIO;
+ goto free_gpio_in;
+ }
+
+ hte.gpio_in_irq = ret;
+ ret = request_irq(ret, tegra_hte_test_gpio_isr,
+ IRQF_TRIGGER_RISING,
+ "tegra_hte_gpio_test_isr", &hte);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire IRQ\n");
+ ret = -ENXIO;
+ goto free_irq;
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+ if (cnt < 0)
+ goto free_irq;
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+ hte.desc = devm_kzalloc(hte.pdev, sizeof(*hte.desc) * cnt, GFP_KERNEL);
+ if (!hte.desc) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (i == 0)
+ /*
+ * GPIO hte init, line_id and name will be parsed from
+ * the device tree node. The edge_flag is implicitly
+ * set by request_irq call. Only line_data is needed to be
+ * set.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL,
+ hte.gpio_in);
+ else
+ /*
+ * same comment as above except that IRQ does not need
+ * line data.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL, NULL);
+
+ ret = hte_ts_get(hte.pdev, &hte.desc[i], i);
+ if (ret)
+ goto ts_put;
+
+ ret = devm_hte_request_ts_ns(hte.pdev, &hte.desc[i],
+ process_hw_ts, NULL,
+ &hte.desc[i]);
+ if (ret) /* no need to ts_put, request API takes care */
+ goto free_irq;
+ }
+
+ timer_setup(&hte.timer, gpio_timer_cb, 0);
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(5000));
+
+ return 0;
+
+ts_put:
+ cnt = i;
+ for (i = 0; i < cnt; i++)
+ hte_ts_put(&hte.desc[i]);
+free_irq:
+ free_irq(hte.gpio_in_irq, &hte);
+free_gpio_in:
+ gpiod_put(hte.gpio_in);
+free_gpio_out:
+ gpiod_put(hte.gpio_out);
+out:
+
+ return ret;
+}
+
+static int tegra_hte_test_remove(struct platform_device *pdev)
+{
+ (void)pdev;
+
+ free_irq(hte.gpio_in_irq, &hte);
+ gpiod_put(hte.gpio_in);
+ gpiod_put(hte.gpio_out);
+ del_timer_sync(&hte.timer);
+
+ return 0;
+}
+
+static struct platform_driver tegra_hte_test_driver = {
+ .probe = tegra_hte_test_probe,
+ .remove = tegra_hte_test_remove,
+ .driver = {
+ .name = "tegra_hte_test",
+ .of_match_table = tegra_hte_test_of_match,
+ },
+};
+module_platform_driver(tegra_hte_test_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
new file mode 100644
index 000000000000..49a27af22742
--- /dev/null
+++ b/drivers/hte/hte-tegra194.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/hte.h>
+#include <linux/uaccess.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#define HTE_SUSPEND 0
+
+/* HTE source clock TSC is 31.25MHz */
+#define HTE_TS_CLK_RATE_HZ 31250000ULL
+#define HTE_CLK_RATE_NS 32
+#define HTE_TS_NS_SHIFT __builtin_ctz(HTE_CLK_RATE_NS)
+
+#define NV_AON_SLICE_INVALID -1
+#define NV_LINES_IN_SLICE 32
+
+/* AON HTE line map For slice 1 */
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_28 12
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_29 13
+
+/* AON HTE line map For slice 2 */
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_0 0
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_1 1
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_2 2
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_3 3
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_4 4
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_5 5
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_6 6
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_7 7
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_8 8
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_9 9
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_10 10
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_11 11
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_12 12
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_13 13
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_14 14
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_15 15
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_16 16
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_17 17
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_18 18
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_19 19
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_20 20
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_21 21
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_22 22
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_23 23
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_24 24
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_25 25
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_26 26
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_27 27
+
+#define HTE_TECTRL 0x0
+#define HTE_TETSCH 0x4
+#define HTE_TETSCL 0x8
+#define HTE_TESRC 0xC
+#define HTE_TECCV 0x10
+#define HTE_TEPCV 0x14
+#define HTE_TECMD 0x1C
+#define HTE_TESTATUS 0x20
+#define HTE_SLICE0_TETEN 0x40
+#define HTE_SLICE1_TETEN 0x60
+
+#define HTE_SLICE_SIZE (HTE_SLICE1_TETEN - HTE_SLICE0_TETEN)
+
+#define HTE_TECTRL_ENABLE_ENABLE 0x1
+
+#define HTE_TECTRL_OCCU_SHIFT 0x8
+#define HTE_TECTRL_INTR_SHIFT 0x1
+#define HTE_TECTRL_INTR_ENABLE 0x1
+
+#define HTE_TESRC_SLICE_SHIFT 16
+#define HTE_TESRC_SLICE_DEFAULT_MASK 0xFF
+
+#define HTE_TECMD_CMD_POP 0x1
+
+#define HTE_TESTATUS_OCCUPANCY_SHIFT 8
+#define HTE_TESTATUS_OCCUPANCY_MASK 0xFF
+
+enum tegra_hte_type {
+ HTE_TEGRA_TYPE_GPIO = 1U << 0,
+ HTE_TEGRA_TYPE_LIC = 1U << 1,
+};
+
+struct hte_slices {
+ u32 r_val;
+ unsigned long flags;
+ /* to prevent lines mapped to same slice updating its register */
+ spinlock_t s_lock;
+};
+
+struct tegra_hte_line_mapped {
+ int slice;
+ u32 bit_index;
+};
+
+struct tegra_hte_line_data {
+ unsigned long flags;
+ void *data;
+};
+
+struct tegra_hte_data {
+ enum tegra_hte_type type;
+ u32 map_sz;
+ u32 sec_map_sz;
+ const struct tegra_hte_line_mapped *map;
+ const struct tegra_hte_line_mapped *sec_map;
+};
+
+struct tegra_hte_soc {
+ int hte_irq;
+ u32 itr_thrshld;
+ u32 conf_rval;
+ struct hte_slices *sl;
+ const struct tegra_hte_data *prov_data;
+ struct tegra_hte_line_data *line_data;
+ struct hte_chip *chip;
+ struct gpio_chip *c;
+ void __iomem *regs;
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ /* CC port */
+ [12] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [13] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [14] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [15] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ /* EE port */
+ [23] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [24] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [27] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [28] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [29] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_sec_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ [12] = {NV_AON_SLICE_INVALID, 0},
+ [13] = {NV_AON_SLICE_INVALID, 0},
+ [14] = {NV_AON_SLICE_INVALID, 0},
+ [15] = {NV_AON_SLICE_INVALID, 0},
+ /* CC port */
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ [27] = {NV_AON_SLICE_INVALID, 0},
+ [28] = {NV_AON_SLICE_INVALID, 0},
+ [29] = {NV_AON_SLICE_INVALID, 0},
+ [30] = {NV_AON_SLICE_INVALID, 0},
+ [31] = {NV_AON_SLICE_INVALID, 0},
+ /* EE port */
+ [32] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [33] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [34] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [35] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [36] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [37] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [38] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+ [39] = {NV_AON_SLICE_INVALID, 0},
+};
+
+static const struct tegra_hte_data aon_hte = {
+ .map_sz = ARRAY_SIZE(tegra194_aon_gpio_map),
+ .map = tegra194_aon_gpio_map,
+ .sec_map_sz = ARRAY_SIZE(tegra194_aon_gpio_sec_map),
+ .sec_map = tegra194_aon_gpio_sec_map,
+ .type = HTE_TEGRA_TYPE_GPIO,
+};
+
+static const struct tegra_hte_data lic_hte = {
+ .map_sz = 0,
+ .map = NULL,
+ .type = HTE_TEGRA_TYPE_LIC,
+};
+
+static inline u32 tegra_hte_readl(struct tegra_hte_soc *hte, u32 reg)
+{
+ return readl(hte->regs + reg);
+}
+
+static inline void tegra_hte_writel(struct tegra_hte_soc *hte, u32 reg,
+ u32 val)
+{
+ writel(val, hte->regs + reg);
+}
+
+static int tegra_hte_map_to_line_id(u32 eid,
+ const struct tegra_hte_line_mapped *m,
+ u32 map_sz, u32 *mapped)
+{
+
+ if (m) {
+ if (eid > map_sz)
+ return -EINVAL;
+ if (m[eid].slice == NV_AON_SLICE_INVALID)
+ return -EINVAL;
+
+ *mapped = (m[eid].slice << 5) + m[eid].bit_index;
+ } else {
+ *mapped = eid;
+ }
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ int ret = 0;
+ u32 line_id;
+ struct tegra_hte_soc *gs;
+ const struct tegra_hte_line_mapped *map = NULL;
+ u32 map_sz = 0;
+
+ if (!gc || !desc || !xlated_id)
+ return -EINVAL;
+
+ if (args) {
+ if (gc->of_hte_n_cells < 1)
+ return -EINVAL;
+
+ if (args->args_count != gc->of_hte_n_cells)
+ return -EINVAL;
+
+ desc->attr.line_id = args->args[0];
+ }
+
+ gs = gc->data;
+ if (!gs || !gs->prov_data)
+ return -EINVAL;
+
+ /*
+ *
+ * There are two paths GPIO consumers can take as follows:
+ * 1) The consumer (gpiolib-cdev for example) which uses GPIO global
+ * number which gets assigned run time.
+ * 2) The consumer passing GPIO from the DT which is assigned
+ * statically for example by using TEGRA194_AON_GPIO gpio DT binding.
+ *
+ * The code below addresses both the consumer use cases and maps into
+ * HTE/GTE namespace.
+ */
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && !args) {
+ line_id = desc->attr.line_id - gs->c->base;
+ map = gs->prov_data->map;
+ map_sz = gs->prov_data->map_sz;
+ } else if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && args) {
+ line_id = desc->attr.line_id;
+ map = gs->prov_data->sec_map;
+ map_sz = gs->prov_data->sec_map_sz;
+ } else {
+ line_id = desc->attr.line_id;
+ }
+
+ ret = tegra_hte_map_to_line_id(line_id, map, map_sz, xlated_id);
+ if (ret < 0) {
+ dev_err(gc->dev, "line_id:%u mapping failed\n",
+ desc->attr.line_id);
+ return ret;
+ }
+
+ if (*xlated_id > gc->nlines)
+ return -EINVAL;
+
+ dev_dbg(gc->dev, "requested id:%u, xlated id:%u\n",
+ desc->attr.line_id, *xlated_id);
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate_plat(struct hte_chip *gc,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ return tegra_hte_line_xlate(gc, NULL, desc, xlated_id);
+}
+
+static int tegra_hte_en_dis_common(struct hte_chip *chip, u32 line_id, bool en)
+{
+ u32 slice, sl_bit_shift, line_bit, val, reg;
+ struct tegra_hte_soc *gs;
+
+ sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ if (!chip)
+ return -EINVAL;
+
+ gs = chip->data;
+
+ if (line_id > chip->nlines) {
+ dev_err(chip->dev,
+ "line id: %u is not supported by this controller\n",
+ line_id);
+ return -EINVAL;
+ }
+
+ slice = line_id >> sl_bit_shift;
+ line_bit = line_id & (HTE_SLICE_SIZE - 1);
+ reg = (slice << sl_bit_shift) + HTE_SLICE0_TETEN;
+
+ spin_lock(&gs->sl[slice].s_lock);
+
+ if (test_bit(HTE_SUSPEND, &gs->sl[slice].flags)) {
+ spin_unlock(&gs->sl[slice].s_lock);
+ dev_dbg(chip->dev, "device suspended");
+ return -EBUSY;
+ }
+
+ val = tegra_hte_readl(gs, reg);
+ if (en)
+ val = val | (1 << line_bit);
+ else
+ val = val & (~(1 << line_bit));
+ tegra_hte_writel(gs, reg, val);
+
+ spin_unlock(&gs->sl[slice].s_lock);
+
+ dev_dbg(chip->dev, "line: %u, slice %u, line_bit %u, reg:0x%x\n",
+ line_id, slice, line_bit, reg);
+
+ return 0;
+}
+
+static int tegra_hte_enable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_disable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_request(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ int ret;
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ if (!attr->line_data)
+ return -EINVAL;
+
+ ret = gpiod_enable_hw_timestamp_ns(attr->line_data,
+ attr->edge_flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = attr->line_data;
+ gs->line_data[line_id].flags = attr->edge_flags;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_release(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+ int ret;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ ret = gpiod_disable_hw_timestamp_ns(attr->line_data,
+ gs->line_data[line_id].flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = NULL;
+ gs->line_data[line_id].flags = 0;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_clk_src_info(struct hte_chip *chip,
+ struct hte_clk_info *ci)
+{
+ (void)chip;
+
+ if (!ci)
+ return -EINVAL;
+
+ ci->hz = HTE_TS_CLK_RATE_HZ;
+ ci->type = CLOCK_MONOTONIC;
+
+ return 0;
+}
+
+static int tegra_hte_get_level(struct tegra_hte_soc *gs, u32 line_id)
+{
+ struct gpio_desc *desc;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ desc = gs->line_data[line_id].data;
+ if (desc)
+ return gpiod_get_raw_value(desc);
+ }
+
+ return -1;
+}
+
+static void tegra_hte_read_fifo(struct tegra_hte_soc *gs)
+{
+ u32 tsh, tsl, src, pv, cv, acv, slice, bit_index, line_id;
+ u64 tsc;
+ struct hte_ts_data el;
+
+ while ((tegra_hte_readl(gs, HTE_TESTATUS) >>
+ HTE_TESTATUS_OCCUPANCY_SHIFT) &
+ HTE_TESTATUS_OCCUPANCY_MASK) {
+ tsh = tegra_hte_readl(gs, HTE_TETSCH);
+ tsl = tegra_hte_readl(gs, HTE_TETSCL);
+ tsc = (((u64)tsh << 32) | tsl);
+
+ src = tegra_hte_readl(gs, HTE_TESRC);
+ slice = (src >> HTE_TESRC_SLICE_SHIFT) &
+ HTE_TESRC_SLICE_DEFAULT_MASK;
+
+ pv = tegra_hte_readl(gs, HTE_TEPCV);
+ cv = tegra_hte_readl(gs, HTE_TECCV);
+ acv = pv ^ cv;
+ while (acv) {
+ bit_index = __builtin_ctz(acv);
+ line_id = bit_index + (slice << 5);
+ el.tsc = tsc << HTE_TS_NS_SHIFT;
+ el.raw_level = tegra_hte_get_level(gs, line_id);
+ hte_push_ts_ns(gs->chip, line_id, &el);
+ acv &= ~BIT(bit_index);
+ }
+ tegra_hte_writel(gs, HTE_TECMD, HTE_TECMD_CMD_POP);
+ }
+}
+
+static irqreturn_t tegra_hte_isr(int irq, void *dev_id)
+{
+ struct tegra_hte_soc *gs = dev_id;
+ (void)irq;
+
+ tegra_hte_read_fifo(gs);
+
+ return IRQ_HANDLED;
+}
+
+static bool tegra_hte_match_from_linedata(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc)
+{
+ struct tegra_hte_soc *hte_dev = chip->data;
+
+ if (!hte_dev || (hte_dev->prov_data->type != HTE_TEGRA_TYPE_GPIO))
+ return false;
+
+ return hte_dev->c == gpiod_to_chip(hdesc->attr.line_data);
+}
+
+static const struct of_device_id tegra_hte_of_match[] = {
+ { .compatible = "nvidia,tegra194-gte-lic", .data = &lic_hte},
+ { .compatible = "nvidia,tegra194-gte-aon", .data = &aon_hte},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_of_match);
+
+static const struct hte_ops g_ops = {
+ .request = tegra_hte_request,
+ .release = tegra_hte_release,
+ .enable = tegra_hte_enable,
+ .disable = tegra_hte_disable,
+ .get_clk_src_info = tegra_hte_clk_src_info,
+};
+
+static void tegra_gte_disable(void *data)
+{
+ struct platform_device *pdev = data;
+ struct tegra_hte_soc *gs = dev_get_drvdata(&pdev->dev);
+
+ tegra_hte_writel(gs, HTE_TECTRL, 0);
+}
+
+static int tegra_get_gpiochip_from_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
+static int tegra_hte_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 i, slices, val = 0;
+ u32 nlines;
+ struct device *dev;
+ struct tegra_hte_soc *hte_dev;
+ struct hte_chip *gc;
+
+ dev = &pdev->dev;
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,slices", &slices);
+ if (ret != 0) {
+ dev_err(dev, "Could not read slices\n");
+ return -EINVAL;
+ }
+ nlines = slices << 5;
+
+ hte_dev = devm_kzalloc(dev, sizeof(*hte_dev), GFP_KERNEL);
+ if (!hte_dev)
+ return -ENOMEM;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, hte_dev);
+ hte_dev->prov_data = of_device_get_match_data(&pdev->dev);
+
+ hte_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hte_dev->regs))
+ return PTR_ERR(hte_dev->regs);
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,int-threshold",
+ &hte_dev->itr_thrshld);
+ if (ret != 0)
+ hte_dev->itr_thrshld = 1;
+
+ hte_dev->sl = devm_kcalloc(dev, slices, sizeof(*hte_dev->sl),
+ GFP_KERNEL);
+ if (!hte_dev->sl)
+ return -ENOMEM;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to get irq\n");
+ return ret;
+ }
+ hte_dev->hte_irq = ret;
+ ret = devm_request_irq(dev, hte_dev->hte_irq, tegra_hte_isr, 0,
+ dev_name(dev), hte_dev);
+ if (ret < 0) {
+ dev_err(dev, "request irq failed.\n");
+ return ret;
+ }
+
+ gc->nlines = nlines;
+ gc->ops = &g_ops;
+ gc->dev = dev;
+ gc->data = hte_dev;
+ gc->xlate_of = tegra_hte_line_xlate;
+ gc->xlate_plat = tegra_hte_line_xlate_plat;
+ gc->of_hte_n_cells = 1;
+
+ if (hte_dev->prov_data &&
+ hte_dev->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ hte_dev->line_data = devm_kcalloc(dev, nlines,
+ sizeof(*hte_dev->line_data),
+ GFP_KERNEL);
+ if (!hte_dev->line_data)
+ return -ENOMEM;
+
+ gc->match_from_linedata = tegra_hte_match_from_linedata;
+
+ hte_dev->c = gpiochip_find("tegra194-gpio-aon",
+ tegra_get_gpiochip_from_name);
+ if (!hte_dev->c)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for gpio controller\n");
+ }
+
+ hte_dev->chip = gc;
+
+ ret = devm_hte_register_chip(hte_dev->chip);
+ if (ret) {
+ dev_err(gc->dev, "hte chip register failed");
+ return ret;
+ }
+
+ for (i = 0; i < slices; i++) {
+ hte_dev->sl[i].flags = 0;
+ spin_lock_init(&hte_dev->sl[i].s_lock);
+ }
+
+ val = HTE_TECTRL_ENABLE_ENABLE |
+ (HTE_TECTRL_INTR_ENABLE << HTE_TECTRL_INTR_SHIFT) |
+ (hte_dev->itr_thrshld << HTE_TECTRL_OCCU_SHIFT);
+ tegra_hte_writel(hte_dev, HTE_TECTRL, val);
+
+ ret = devm_add_action_or_reset(&pdev->dev, tegra_gte_disable, pdev);
+ if (ret)
+ return ret;
+
+ dev_dbg(gc->dev, "lines: %d, slices:%d", gc->nlines, slices);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_resume_early(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ tegra_hte_writel(gs, HTE_TECTRL, gs->conf_rval);
+
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ tegra_hte_writel(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN),
+ gs->sl[i].r_val);
+ clear_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_suspend_late(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ gs->conf_rval = tegra_hte_readl(gs, HTE_TECTRL);
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ gs->sl[i].r_val = tegra_hte_readl(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN));
+ set_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_hte_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(tegra_hte_suspend_late,
+ tegra_hte_resume_early)
+};
+
+static struct platform_driver tegra_hte_driver = {
+ .probe = tegra_hte_probe,
+ .driver = {
+ .name = "tegra_hte",
+ .pm = &tegra_hte_pm,
+ .of_match_table = tegra_hte_of_match,
+ },
+};
+
+module_platform_driver(tegra_hte_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra HTE (Hardware Timestamping Engine) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte.c b/drivers/hte/hte.c
new file mode 100644
index 000000000000..7c3b4476f890
--- /dev/null
+++ b/drivers/hte/hte.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/hte.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+
+#define HTE_TS_NAME_LEN 10
+
+/* Global list of the HTE devices */
+static DEFINE_SPINLOCK(hte_lock);
+static LIST_HEAD(hte_devices);
+
+enum {
+ HTE_TS_REGISTERED,
+ HTE_TS_REQ,
+ HTE_TS_DISABLE,
+ HTE_TS_QUEUE_WK,
+};
+
+/**
+ * struct hte_ts_info - Information related to requested timestamp.
+ *
+ * @xlated_id: Timestamp ID as understood between HTE subsys and HTE provider,
+ * See xlate callback API.
+ * @flags: Flags holding state information.
+ * @hte_cb_flags: Callback related flags.
+ * @seq: Timestamp sequence counter.
+ * @line_name: HTE allocated line name.
+ * @free_attr_name: If set, free the attr name.
+ * @cb: A nonsleeping callback function provided by clients.
+ * @tcb: A secondary sleeping callback function provided by clients.
+ * @dropped_ts: Dropped timestamps.
+ * @slock: Spin lock to synchronize between disable/enable,
+ * request/release APIs.
+ * @cb_work: callback workqueue, used when tcb is specified.
+ * @req_mlock: Lock during timestamp request/release APIs.
+ * @ts_dbg_root: Root for the debug fs.
+ * @gdev: HTE abstract device that this timestamp information belongs to.
+ * @cl_data: Client specific data.
+ */
+struct hte_ts_info {
+ u32 xlated_id;
+ unsigned long flags;
+ unsigned long hte_cb_flags;
+ u64 seq;
+ char *line_name;
+ bool free_attr_name;
+ hte_ts_cb_t cb;
+ hte_ts_sec_cb_t tcb;
+ atomic_t dropped_ts;
+ spinlock_t slock;
+ struct work_struct cb_work;
+ struct mutex req_mlock;
+ struct dentry *ts_dbg_root;
+ struct hte_device *gdev;
+ void *cl_data;
+};
+
+/**
+ * struct hte_device - HTE abstract device
+ * @nlines: Number of entities this device supports.
+ * @ts_req: Total number of entities requested.
+ * @sdev: Device used at various debug prints.
+ * @dbg_root: Root directory for debug fs.
+ * @list: List node to store hte_device for each provider.
+ * @chip: HTE chip providing this HTE device.
+ * @owner: helps prevent removal of modules when in use.
+ * @ei: Timestamp information.
+ */
+struct hte_device {
+ u32 nlines;
+ atomic_t ts_req;
+ struct device *sdev;
+ struct dentry *dbg_root;
+ struct list_head list;
+ struct hte_chip *chip;
+ struct module *owner;
+ struct hte_ts_info ei[];
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *hte_root;
+
+static int __init hte_subsys_dbgfs_init(void)
+{
+ /* creates /sys/kernel/debug/hte/ */
+ hte_root = debugfs_create_dir("hte", NULL);
+
+ return 0;
+}
+subsys_initcall(hte_subsys_dbgfs_init);
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+ const struct hte_chip *chip = gdev->chip;
+ const char *name = chip->name ? chip->name : dev_name(chip->dev);
+
+ gdev->dbg_root = debugfs_create_dir(name, hte_root);
+
+ debugfs_create_atomic_t("ts_requested", 0444, gdev->dbg_root,
+ &gdev->ts_req);
+ debugfs_create_u32("total_ts", 0444, gdev->dbg_root,
+ &gdev->nlines);
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+ if (!ei->gdev->dbg_root || !name)
+ return;
+
+ ei->ts_dbg_root = debugfs_create_dir(name, ei->gdev->dbg_root);
+
+ debugfs_create_atomic_t("dropped_timestamps", 0444, ei->ts_dbg_root,
+ &ei->dropped_ts);
+}
+
+#else
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+}
+
+#endif
+
+/**
+ * hte_ts_put() - Release and disable timestamp for the given desc.
+ *
+ * @desc: timestamp descriptor.
+ *
+ * Context: debugfs_remove_recursive() function call may use sleeping locks,
+ * not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_ts_put(struct hte_ts_desc *desc)
+{
+ int ret = 0;
+ unsigned long flag;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is registered but not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ clear_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = NULL;
+ ret = 0;
+ goto mod_put;
+ }
+
+ ret = gdev->chip->ops->release(gdev->chip, desc, ei->xlated_id);
+ if (ret) {
+ dev_err(gdev->sdev, "id: %d free failed\n",
+ desc->attr.line_id);
+ goto unlock;
+ }
+
+ kfree(ei->line_name);
+ if (ei->free_attr_name)
+ kfree_const(desc->attr.name);
+
+ debugfs_remove_recursive(ei->ts_dbg_root);
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (test_bit(HTE_TS_QUEUE_WK, &ei->flags)) {
+ spin_unlock_irqrestore(&ei->slock, flag);
+ flush_work(&ei->cb_work);
+ spin_lock_irqsave(&ei->slock, flag);
+ }
+
+ atomic_dec(&gdev->ts_req);
+ atomic_set(&ei->dropped_ts, 0);
+
+ ei->seq = 1;
+ ei->flags = 0;
+ desc->hte_data = NULL;
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ ei->cb = NULL;
+ ei->tcb = NULL;
+ ei->cl_data = NULL;
+
+mod_put:
+ module_put(gdev->owner);
+unlock:
+ mutex_unlock(&ei->req_mlock);
+ dev_dbg(gdev->sdev, "release id: %d\n", desc->attr.line_id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_put);
+
+static int hte_ts_dis_en_common(struct hte_ts_desc *desc, bool en)
+{
+ u32 ts_id;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ int ret;
+ unsigned long flag;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+ ts_id = desc->attr.line_id;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ dev_dbg(gdev->sdev, "id:%d is not registered", ts_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (en) {
+ if (!test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->enable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d enable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_DISABLE, &ei->flags);
+ } else {
+ if (test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->disable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d disable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ set_bit(HTE_TS_DISABLE, &ei->flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+out:
+ mutex_unlock(&ei->req_mlock);
+ return ret;
+}
+
+/**
+ * hte_disable_ts() - Disable timestamp on given descriptor.
+ *
+ * The API does not release any resources associated with desc.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, false);
+}
+EXPORT_SYMBOL_GPL(hte_disable_ts);
+
+/**
+ * hte_enable_ts() - Enable timestamp on given descriptor.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, true);
+}
+EXPORT_SYMBOL_GPL(hte_enable_ts);
+
+static void hte_do_cb_work(struct work_struct *w)
+{
+ unsigned long flag;
+ struct hte_ts_info *ei = container_of(w, struct hte_ts_info, cb_work);
+
+ if (unlikely(!ei->tcb))
+ return;
+
+ ei->tcb(ei->cl_data);
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ spin_unlock_irqrestore(&ei->slock, flag);
+}
+
+static int __hte_req_ts(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei = desc->hte_data;
+
+ gdev = ei->gdev;
+ /*
+ * There is a chance that multiple consumers requesting same entity,
+ * lock here.
+ */
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ !test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(gdev->chip->dev, "id:%u req failed\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto unlock;
+ }
+
+ ei->cb = cb;
+ ei->tcb = tcb;
+ if (tcb)
+ INIT_WORK(&ei->cb_work, hte_do_cb_work);
+
+ ret = gdev->chip->ops->request(gdev->chip, desc, ei->xlated_id);
+ if (ret < 0) {
+ dev_err(gdev->chip->dev, "ts request failed\n");
+ goto unlock;
+ }
+
+ ei->cl_data = data;
+ ei->seq = 1;
+
+ atomic_inc(&gdev->ts_req);
+
+ ei->line_name = NULL;
+ if (!desc->attr.name) {
+ ei->line_name = kzalloc(HTE_TS_NAME_LEN, GFP_KERNEL);
+ if (ei->line_name)
+ scnprintf(ei->line_name, HTE_TS_NAME_LEN, "ts_%u",
+ desc->attr.line_id);
+ }
+
+ hte_ts_dbgfs_init(desc->attr.name == NULL ?
+ ei->line_name : desc->attr.name, ei);
+ set_bit(HTE_TS_REGISTERED, &ei->flags);
+
+ dev_dbg(gdev->chip->dev, "id: %u, xlated id:%u",
+ desc->attr.line_id, ei->xlated_id);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static int hte_bind_ts_info_locked(struct hte_ts_info *ei,
+ struct hte_ts_desc *desc, u32 x_id)
+{
+ int ret = 0;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(ei->gdev->chip->dev, "id:%u is already requested\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ set_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = ei;
+ ei->xlated_id = x_id;
+
+out:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static struct hte_device *of_node_to_htedevice(struct device_node *np)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->dev &&
+ gdev->chip->dev->of_node == np) {
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct hte_device *hte_find_dev_from_linedata(struct hte_ts_desc *desc)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->match_from_linedata) {
+ if (!gdev->chip->match_from_linedata(gdev->chip, desc))
+ continue;
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * of_hte_req_count - Return the number of entities to timestamp.
+ *
+ * The function returns the total count of the requested entities to timestamp
+ * by parsing device tree.
+ *
+ * @dev: The HTE consumer.
+ *
+ * Returns: Positive number on success, -ENOENT if no entries,
+ * -EINVAL for other errors.
+ */
+int of_hte_req_count(struct device *dev)
+{
+ int count;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_count_phandle_with_args(dev->of_node, "timestamps",
+ "#timestamp-cells");
+
+ return count ? count : -ENOENT;
+}
+EXPORT_SYMBOL_GPL(of_hte_req_count);
+
+static inline struct hte_device *hte_get_dev(struct hte_ts_desc *desc)
+{
+ return hte_find_dev_from_linedata(desc);
+}
+
+static struct hte_device *hte_of_get_dev(struct device *dev,
+ struct hte_ts_desc *desc,
+ int index,
+ struct of_phandle_args *args,
+ bool *free_name)
+{
+ int ret;
+ struct device_node *np;
+ char *temp;
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = dev->of_node;
+
+ if (!of_find_property(np, "timestamp-names", NULL)) {
+ /* Let hte core construct it during request time */
+ desc->attr.name = NULL;
+ } else {
+ ret = of_property_read_string_index(np, "timestamp-names",
+ index, &desc->attr.name);
+ if (ret) {
+ pr_err("can't parse \"timestamp-names\" property\n");
+ return ERR_PTR(ret);
+ }
+ *free_name = false;
+ if (desc->attr.name) {
+ temp = skip_spaces(desc->attr.name);
+ if (!*temp)
+ desc->attr.name = NULL;
+ }
+ }
+
+ ret = of_parse_phandle_with_args(np, "timestamps", "#timestamp-cells",
+ index, args);
+ if (ret) {
+ pr_err("%s(): can't parse \"timestamps\" property\n",
+ __func__);
+ return ERR_PTR(ret);
+ }
+
+ of_node_put(args->np);
+
+ return of_node_to_htedevice(args->np);
+}
+
+/**
+ * hte_ts_get() - The function to initialize and obtain HTE desc.
+ *
+ * The function initializes the consumer provided HTE descriptor. If consumer
+ * has device tree node, index is used to parse the line id and other details.
+ * The function needs to be called before using any request APIs.
+ *
+ * @dev: HTE consumer/client device, used in case of parsing device tree node.
+ * @desc: Pre-allocated timestamp descriptor.
+ * @index: The index will be used as an index to parse line_id from the
+ * device tree node if node is present.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index)
+{
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ const struct fwnode_handle *fwnode;
+ struct of_phandle_args args;
+ u32 xlated_id;
+ int ret;
+ bool free_name = false;
+
+ if (!desc)
+ return -EINVAL;
+
+ fwnode = dev ? dev_fwnode(dev) : NULL;
+
+ if (is_of_node(fwnode))
+ gdev = hte_of_get_dev(dev, desc, index, &args, &free_name);
+ else
+ gdev = hte_get_dev(desc);
+
+ if (IS_ERR(gdev)) {
+ pr_err("%s() no hte dev found\n", __func__);
+ return PTR_ERR(gdev);
+ }
+
+ if (!try_module_get(gdev->owner))
+ return -ENODEV;
+
+ if (!gdev->chip) {
+ pr_err("%s(): requested id does not have provider\n",
+ __func__);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ if (is_of_node(fwnode)) {
+ if (!gdev->chip->xlate_of)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_of(gdev->chip, &args,
+ desc, &xlated_id);
+ } else {
+ if (!gdev->chip->xlate_plat)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_plat(gdev->chip, desc,
+ &xlated_id);
+ }
+
+ if (ret < 0)
+ goto put;
+
+ ei = &gdev->ei[xlated_id];
+
+ ret = hte_bind_ts_info_locked(ei, desc, xlated_id);
+ if (ret)
+ goto put;
+
+ ei->free_attr_name = free_name;
+
+ return 0;
+
+put:
+ module_put(gdev->owner);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_get);
+
+static void __devm_hte_release_ts(void *res)
+{
+ hte_ts_put(res);
+}
+
+/**
+ * hte_request_ts_ns() - The API to request and enable hardware timestamp in
+ * nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp.
+ *
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !cb)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ ret = __hte_req_ts(desc, cb, tcb, data);
+ if (ret < 0) {
+ dev_err(ei->gdev->chip->dev,
+ "failed to request id: %d\n", desc->attr.line_id);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_request_ts_ns);
+
+/**
+ * devm_hte_request_ts_ns() - Resource managed API to request and enable
+ * hardware timestamp in nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp. It
+ * deallocates and disables automatically when the consumer exits.
+ *
+ * @dev: HTE consumer/client device.
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ err = hte_request_ts_ns(desc, cb, tcb, data);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, __devm_hte_release_ts, desc);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_request_ts_ns);
+
+/**
+ * hte_init_line_attr() - Initialize line attributes.
+ *
+ * Zeroes out line attributes and initializes with provided arguments.
+ * The function needs to be called before calling any consumer facing
+ * functions.
+ *
+ * @desc: Pre-allocated timestamp descriptor.
+ * @line_id: line id.
+ * @edge_flags: edge flags related to line_id.
+ * @name: name of the line.
+ * @data: line data related to line_id.
+ *
+ * Context: Any.
+ * Returns: 0 on success or negative error code for the failure.
+ */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name, void *data)
+{
+ if (!desc)
+ return -EINVAL;
+
+ memset(&desc->attr, 0, sizeof(desc->attr));
+
+ desc->attr.edge_flags = edge_flags;
+ desc->attr.line_id = line_id;
+ desc->attr.line_data = data;
+ if (name) {
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ }
+
+ desc->attr.name = name;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_init_line_attr);
+
+/**
+ * hte_get_clk_src_info() - Get the clock source information for a ts
+ * descriptor.
+ *
+ * @desc: ts descriptor, same as returned from request API.
+ * @ci: The API fills this structure with the clock information data.
+ *
+ * Context: Any context.
+ * Returns: 0 on success else negative error code on failure.
+ */
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ struct hte_chip *chip;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !ci) {
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ ei = desc->hte_data;
+ if (!ei->gdev || !ei->gdev->chip)
+ return -EINVAL;
+
+ chip = ei->gdev->chip;
+ if (!chip->ops->get_clk_src_info)
+ return -EOPNOTSUPP;
+
+ return chip->ops->get_clk_src_info(chip, ci);
+}
+EXPORT_SYMBOL_GPL(hte_get_clk_src_info);
+
+/**
+ * hte_push_ts_ns() - Push timestamp data in nanoseconds.
+ *
+ * It is used by the provider to push timestamp data.
+ *
+ * @chip: The HTE chip, used during the registration.
+ * @xlated_id: entity id understood by both subsystem and provider, this is
+ * obtained from xlate callback during request API.
+ * @data: timestamp data.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data)
+{
+ enum hte_return ret;
+ int st = 0;
+ struct hte_ts_info *ei;
+ unsigned long flag;
+
+ if (!chip || !data || !chip->gdev)
+ return -EINVAL;
+
+ if (xlated_id >= chip->nlines)
+ return -EINVAL;
+
+ ei = &chip->gdev->ei[xlated_id];
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ /* timestamp sequence counter */
+ data->seq = ei->seq++;
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ dev_dbg(chip->dev, "Unknown timestamp push\n");
+ atomic_inc(&ei->dropped_ts);
+ st = -EINVAL;
+ goto unlock;
+ }
+
+ ret = ei->cb(data, ei->cl_data);
+ if (ret == HTE_RUN_SECOND_CB && ei->tcb) {
+ queue_work(system_unbound_wq, &ei->cb_work);
+ set_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ return st;
+}
+EXPORT_SYMBOL_GPL(hte_push_ts_ns);
+
+static int hte_register_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+ u32 i;
+
+ if (!chip || !chip->dev || !chip->dev->of_node)
+ return -EINVAL;
+
+ if (!chip->ops || !chip->ops->request || !chip->ops->release) {
+ dev_err(chip->dev, "Driver needs to provide ops\n");
+ return -EINVAL;
+ }
+
+ gdev = kzalloc(struct_size(gdev, ei, chip->nlines), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->chip = chip;
+ chip->gdev = gdev;
+ gdev->nlines = chip->nlines;
+ gdev->sdev = chip->dev;
+
+ for (i = 0; i < chip->nlines; i++) {
+ gdev->ei[i].gdev = gdev;
+ mutex_init(&gdev->ei[i].req_mlock);
+ spin_lock_init(&gdev->ei[i].slock);
+ }
+
+ if (chip->dev->driver)
+ gdev->owner = chip->dev->driver->owner;
+ else
+ gdev->owner = THIS_MODULE;
+
+ of_node_get(chip->dev->of_node);
+
+ INIT_LIST_HEAD(&gdev->list);
+
+ spin_lock(&hte_lock);
+ list_add_tail(&gdev->list, &hte_devices);
+ spin_unlock(&hte_lock);
+
+ hte_chip_dbgfs_init(gdev);
+
+ dev_dbg(chip->dev, "Added hte chip\n");
+
+ return 0;
+}
+
+static int hte_unregister_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+
+ if (!chip)
+ return -EINVAL;
+
+ gdev = chip->gdev;
+
+ spin_lock(&hte_lock);
+ list_del(&gdev->list);
+ spin_unlock(&hte_lock);
+
+ gdev->chip = NULL;
+
+ of_node_put(chip->dev->of_node);
+ debugfs_remove_recursive(gdev->dbg_root);
+ kfree(gdev);
+
+ dev_dbg(chip->dev, "Removed hte chip\n");
+
+ return 0;
+}
+
+static void _hte_devm_unregister_chip(void *chip)
+{
+ hte_unregister_chip(chip);
+}
+
+/**
+ * devm_hte_register_chip() - Resource managed API to register HTE chip.
+ *
+ * It is used by the provider to register itself with the HTE subsystem.
+ * The unregistration is done automatically when the provider exits.
+ *
+ * @chip: the HTE chip to add to subsystem.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int devm_hte_register_chip(struct hte_chip *chip)
+{
+ int err;
+
+ err = hte_register_chip(chip);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(chip->dev, _hte_devm_unregister_chip,
+ chip);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_register_chip);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 9c1b3620775c..714d549b7b46 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -575,31 +575,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = hv_dev->driver_override;
- if (strlen(driver_override)) {
- hv_dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- hv_dev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index af00dca8d1ac..ee6ce92ab4c3 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1379,7 +1379,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (conn->child_dev) {
+ if (conn->child_dev && conn->child_dev->has_conns_grp) {
ret = coresight_make_links(csdev, conn,
conn->child_dev);
if (ret)
@@ -1571,6 +1571,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
+ bool registered = false;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
@@ -1591,7 +1592,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_free_csdev;
+ kfree(csdev);
+ goto err_out;
}
csdev->refcnt = refcnts;
@@ -1616,6 +1618,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ /*
+ * Make sure the device registration and the connection fixup
+ * are synchronised, so that we don't see uninitialised devices
+ * on the coresight bus while trying to resolve the connections.
+ */
+ mutex_lock(&coresight_mutex);
+
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
@@ -1623,7 +1632,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* All resources are free'd explicitly via
* coresight_device_release(), triggered from put_device().
*/
- goto err_out;
+ goto out_unlock;
}
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -1638,11 +1647,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* from put_device(), which is in turn called from
* function device_unregister().
*/
- goto err_out;
+ goto out_unlock;
}
}
-
- mutex_lock(&coresight_mutex);
+ /* Device is now registered */
+ registered = true;
ret = coresight_create_conns_sysfs_group(csdev);
if (!ret)
@@ -1652,16 +1661,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
if (!ret && cti_assoc_ops && cti_assoc_ops->add)
cti_assoc_ops->add(csdev);
+out_unlock:
mutex_unlock(&coresight_mutex);
- if (ret) {
+ /* Success */
+ if (!ret)
+ return csdev;
+
+ /* Unregister the device if needed */
+ if (registered) {
coresight_unregister(csdev);
return ERR_PTR(ret);
}
- return csdev;
-
-err_free_csdev:
- kfree(csdev);
err_out:
/* Cleanup the connection information */
coresight_release_platform_data(NULL, desc->pdata);
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 8845ec4b4402..1874df7c6a73 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self,
int cpu;
struct debug_drvdata *drvdata;
- mutex_lock(&debug_lock);
+ /* Bail out if we can't acquire the mutex or the functionality is off */
+ if (!mutex_trylock(&debug_lock))
+ return NOTIFY_DONE;
- /* Bail out if the functionality is disabled */
if (!debug_enable)
goto skip_dump;
@@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self,
skip_dump:
mutex_unlock(&debug_lock);
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 7d413ba8b823..d0ab9933472b 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -204,7 +204,7 @@ void etm_set_default(struct etm_config *config)
* set all bits in register 0x007, the ETMTECR2, to 0
* set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
*/
- config->enable_ctrl1 = BIT(24);
+ config->enable_ctrl1 = ETMTECR1_INC_EXC;
config->enable_ctrl2 = 0x0;
config->enable_event = ETM_HARD_WIRE_RES_A;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e8c7649f123e..68fcbf4ce7a8 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -474,7 +474,7 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->startstop_ctrl |= (1 << idx);
- config->enable_ctrl1 |= BIT(25);
+ config->enable_ctrl1 |= ETMTECR1_START_STOP;
spin_unlock(&drvdata->spinlock);
return size;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 7f416a12000e..87299e99dabb 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -443,7 +443,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
- config->ss_status[i] &= ~BIT(31);
+ config->ss_status[i] &= ~TRCSSCSRn_STATUS;
etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
if (etm4x_sspcicrn_present(drvdata, i))
@@ -633,7 +633,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
/* Go from generic option to ETMv4 specifics */
if (attr->config & BIT(ETM_OPT_CYCACC)) {
- config->cfg |= BIT(4);
+ config->cfg |= TRCCONFIGR_CCI;
/* TRM: Must program this for cycacc to work */
config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
}
@@ -653,14 +653,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
goto out;
/* bit[11], Global timestamp tracing bit */
- config->cfg |= BIT(11);
+ config->cfg |= TRCCONFIGR_TS;
}
/* Only trace contextID when runs in root PID namespace */
if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
task_is_in_init_pid_ns(current))
/* bit[6], Context ID tracing bit */
- config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ config->cfg |= TRCCONFIGR_CID;
/*
* If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
@@ -672,17 +672,15 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = -EINVAL;
goto out;
}
-
/* Only trace virtual contextID when runs in root PID namespace */
if (task_is_in_init_pid_ns(current))
- config->cfg |= BIT(ETM4_CFG_BIT_VMID) |
- BIT(ETM4_CFG_BIT_VMID_OPT);
+ config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
}
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
- config->cfg |= BIT(12);
+ config->cfg |= TRCCONFIGR_RS;
/*
* Set any selected configuration and preset.
@@ -1097,107 +1095,67 @@ static void etm4_init_arch_data(void *info)
etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
- if (BMVAL(etmidr0, 1, 2) == 0b11)
- drvdata->instrp0 = true;
- else
- drvdata->instrp0 = false;
-
+ drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
/* TRCBB, bit[5] Branch broadcast tracing support bit */
- if (BMVAL(etmidr0, 5, 5))
- drvdata->trcbb = true;
- else
- drvdata->trcbb = false;
-
+ drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
/* TRCCOND, bit[6] Conditional instruction tracing support bit */
- if (BMVAL(etmidr0, 6, 6))
- drvdata->trccond = true;
- else
- drvdata->trccond = false;
-
+ drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
/* TRCCCI, bit[7] Cycle counting instruction bit */
- if (BMVAL(etmidr0, 7, 7))
- drvdata->trccci = true;
- else
- drvdata->trccci = false;
-
+ drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
/* RETSTACK, bit[9] Return stack bit */
- if (BMVAL(etmidr0, 9, 9))
- drvdata->retstack = true;
- else
- drvdata->retstack = false;
-
+ drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
/* NUMEVENT, bits[11:10] Number of events field */
- drvdata->nr_event = BMVAL(etmidr0, 10, 11);
+ drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
/* QSUPP, bits[16:15] Q element support field */
- drvdata->q_support = BMVAL(etmidr0, 15, 16);
+ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
/* TSSIZE, bits[28:24] Global timestamp size field */
- drvdata->ts_size = BMVAL(etmidr0, 24, 28);
+ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
/* maximum size of resources */
etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
- drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
+ drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
- drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
+ drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
- drvdata->ccsize = BMVAL(etmidr2, 25, 28);
+ drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
- drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
+ drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
- drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
- drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
-
+ drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
/*
* TRCERR, bit[24] whether a trace unit can trace a
* system error exception.
*/
- if (BMVAL(etmidr3, 24, 24))
- drvdata->trc_error = true;
- else
- drvdata->trc_error = false;
-
+ drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
/* SYNCPR, bit[25] implementation has a fixed synchronization period? */
- if (BMVAL(etmidr3, 25, 25))
- drvdata->syncpr = true;
- else
- drvdata->syncpr = false;
-
+ drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
/* STALLCTL, bit[26] is stall control implemented? */
- if (BMVAL(etmidr3, 26, 26))
- drvdata->stallctl = true;
- else
- drvdata->stallctl = false;
-
+ drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
/* SYSSTALL, bit[27] implementation can support stall control? */
- if (BMVAL(etmidr3, 27, 27))
- drvdata->sysstall = true;
- else
- drvdata->sysstall = false;
-
+ drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
/*
* NUMPROC - the number of PEs available for tracing, 5bits
* = TRCIDR3.bits[13:12]bits[30:28]
* bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
* bits[3:0] = TRCIDR3.bits[30:28]
*/
- drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
-
+ drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
+ FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
- if (BMVAL(etmidr3, 31, 31))
- drvdata->nooverflow = true;
- else
- drvdata->nooverflow = false;
+ drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
/* number of resources trace unit supports */
etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
- drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
+ drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
- drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
+ drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
/*
* NUMRSPAIR, bits[19:16]
* The number of resource pairs conveyed by the HW starts at 0, i.e a
@@ -1208,7 +1166,7 @@ static void etm4_init_arch_data(void *info)
* the default TRUE and FALSE resource selectors are omitted.
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
- drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+ drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
@@ -1216,45 +1174,39 @@ static void etm4_init_arch_data(void *info)
* comparator control for tracing. Read any status regs as these
* also contain RO capability data.
*/
- drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
- drvdata->numcidc = BMVAL(etmidr4, 24, 27);
+ drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
- drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
+ drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
- drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
+ drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
- drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
+ drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
- if (BMVAL(etmidr5, 22, 22))
- drvdata->atbtrig = true;
- else
- drvdata->atbtrig = false;
+ drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
/*
* LPOVERRIDE, bit[23] implementation supports
* low-power state override
*/
- if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
- drvdata->lpoverride = true;
- else
- drvdata->lpoverride = false;
+ drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
/* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
- drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
+ drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
- drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+ drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
etm4_cs_lock(drvdata, csa);
cpu_detect_trace_filtering(drvdata);
}
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
{
- return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
+ return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
}
/* Set ELx trace filter access in the TRCVICTLR register */
@@ -1280,7 +1232,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl = BIT(0);
+ config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
/* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
etm4_set_victlr_access(config);
@@ -1389,7 +1341,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
@@ -1493,7 +1445,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1521,7 +1473,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* etm4_disable_perf().
*/
if (filters->ssstatus)
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No include/exclude filtering for ViewInst */
config->viiectlr = 0x0;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 21687cc1e4e2..6ea8181816fc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -22,7 +22,7 @@ static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
* TRCACATRn.TYPE bit[1:0]: type of comparison
* the trace unit performs
*/
- if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+ if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
if (idx % 2 != 0)
return -EINVAL;
@@ -180,12 +180,12 @@ static ssize_t reset_store(struct device *dev,
/* Disable data tracing: do not trace load and store data transfers */
config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
- config->cfg &= ~(BIT(1) | BIT(2));
+ config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
/* Disable data value and data address tracing */
config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
ETM_MODE_DATA_TRACE_VAL);
- config->cfg &= ~(BIT(16) | BIT(17));
+ config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
/* Disable all events tracing */
config->eventctrl0 = 0x0;
@@ -206,11 +206,11 @@ static ssize_t reset_store(struct device *dev,
* started state. ARM recommends start-stop logic is set before
* each trace run.
*/
- config->vinst_ctrl = BIT(0);
+ config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
if (drvdata->nr_addr_cmp > 0) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
}
/* No address range filtering for ViewInst */
@@ -304,134 +304,134 @@ static ssize_t mode_store(struct device *dev,
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
- config->cfg &= ~(BIT(1) | BIT(2));
+ config->cfg &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
if (config->mode & ETM_MODE_LOAD)
/* 0b01 Trace load instructions as P0 instructions */
- config->cfg |= BIT(1);
+ config->cfg |= TRCCONFIGR_INSTP0_LOAD;
if (config->mode & ETM_MODE_STORE)
/* 0b10 Trace store instructions as P0 instructions */
- config->cfg |= BIT(2);
+ config->cfg |= TRCCONFIGR_INSTP0_STORE;
if (config->mode & ETM_MODE_LOAD_STORE)
/*
* 0b11 Trace load and store instructions
* as P0 instructions
*/
- config->cfg |= BIT(1) | BIT(2);
+ config->cfg |= TRCCONFIGR_INSTP0_LOAD_STORE;
}
/* bit[3], Branch broadcast mode */
if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
- config->cfg |= BIT(3);
+ config->cfg |= TRCCONFIGR_BB;
else
- config->cfg &= ~BIT(3);
+ config->cfg &= ~TRCCONFIGR_BB;
/* bit[4], Cycle counting instruction trace bit */
if ((config->mode & ETMv4_MODE_CYCACC) &&
(drvdata->trccci == true))
- config->cfg |= BIT(4);
+ config->cfg |= TRCCONFIGR_CCI;
else
- config->cfg &= ~BIT(4);
+ config->cfg &= ~TRCCONFIGR_CCI;
/* bit[6], Context ID tracing bit */
if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
- config->cfg |= BIT(6);
+ config->cfg |= TRCCONFIGR_CID;
else
- config->cfg &= ~BIT(6);
+ config->cfg &= ~TRCCONFIGR_CID;
if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
- config->cfg |= BIT(7);
+ config->cfg |= TRCCONFIGR_VMID;
else
- config->cfg &= ~BIT(7);
+ config->cfg &= ~TRCCONFIGR_VMID;
/* bits[10:8], Conditional instruction tracing bit */
mode = ETM_MODE_COND(config->mode);
if (drvdata->trccond == true) {
- config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
- config->cfg |= mode << 8;
+ config->cfg &= ~TRCCONFIGR_COND_MASK;
+ config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
}
/* bit[11], Global timestamp tracing bit */
if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
- config->cfg |= BIT(11);
+ config->cfg |= TRCCONFIGR_TS;
else
- config->cfg &= ~BIT(11);
+ config->cfg &= ~TRCCONFIGR_TS;
/* bit[12], Return stack enable bit */
if ((config->mode & ETM_MODE_RETURNSTACK) &&
(drvdata->retstack == true))
- config->cfg |= BIT(12);
+ config->cfg |= TRCCONFIGR_RS;
else
- config->cfg &= ~BIT(12);
+ config->cfg &= ~TRCCONFIGR_RS;
/* bits[14:13], Q element enable field */
mode = ETM_MODE_QELEM(config->mode);
/* start by clearing QE bits */
- config->cfg &= ~(BIT(13) | BIT(14));
+ config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
/*
* if supported, Q elements with instruction counts are enabled.
* Always set the low bit for any requested mode. Valid combos are
* 0b00, 0b01 and 0b11.
*/
if (mode && drvdata->q_support)
- config->cfg |= BIT(13);
+ config->cfg |= TRCCONFIGR_QE_W_COUNTS;
/*
* if supported, Q elements with and without instruction
* counts are enabled
*/
if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
- config->cfg |= BIT(14);
+ config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
(drvdata->atbtrig == true))
- config->eventctrl1 |= BIT(11);
+ config->eventctrl1 |= TRCEVENTCTL1R_ATB;
else
- config->eventctrl1 &= ~BIT(11);
+ config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
/* bit[12], Low-power state behavior override bit */
if ((config->mode & ETM_MODE_LPOVERRIDE) &&
(drvdata->lpoverride == true))
- config->eventctrl1 |= BIT(12);
+ config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
else
- config->eventctrl1 &= ~BIT(12);
+ config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
/* bit[8], Instruction stall bit */
if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
- config->stall_ctrl |= BIT(8);
+ config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
else
- config->stall_ctrl &= ~BIT(8);
+ config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
/* bit[10], Prioritize instruction trace bit */
if (config->mode & ETM_MODE_INSTPRIO)
- config->stall_ctrl |= BIT(10);
+ config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
else
- config->stall_ctrl &= ~BIT(10);
+ config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
/* bit[13], Trace overflow prevention bit */
if ((config->mode & ETM_MODE_NOOVERFLOW) &&
(drvdata->nooverflow == true))
- config->stall_ctrl |= BIT(13);
+ config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
else
- config->stall_ctrl &= ~BIT(13);
+ config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
/* bit[9] Start/stop logic control bit */
if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
else
- config->vinst_ctrl &= ~BIT(9);
+ config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
/* bit[10], Whether a trace unit must trace a Reset exception */
if (config->mode & ETM_MODE_TRACE_RESET)
- config->vinst_ctrl |= BIT(10);
+ config->vinst_ctrl |= TRCVICTLR_TRCRESET;
else
- config->vinst_ctrl &= ~BIT(10);
+ config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
/* bit[11], Whether a trace unit must trace a system error exception */
if ((config->mode & ETM_MODE_TRACE_ERR) &&
(drvdata->trc_error == true))
- config->vinst_ctrl |= BIT(11);
+ config->vinst_ctrl |= TRCVICTLR_TRCERR;
else
- config->vinst_ctrl &= ~BIT(11);
+ config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm4_config_trace_mode(config);
@@ -534,7 +534,7 @@ static ssize_t event_instren_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->eventctrl1, 0, 3);
+ val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -551,23 +551,28 @@ static ssize_t event_instren_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* start by clearing all instruction event enable bits */
- config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+ config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
switch (drvdata->nr_event) {
case 0x0:
/* generate Event element for event 1 */
- config->eventctrl1 |= val & BIT(1);
+ config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
break;
case 0x1:
/* generate Event element for event 1 and 2 */
- config->eventctrl1 |= val & (BIT(0) | BIT(1));
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
break;
case 0x2:
/* generate Event element for event 1, 2 and 3 */
- config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+ TRCEVENTCTL1R_INSTEN_1 |
+ TRCEVENTCTL1R_INSTEN_2);
break;
case 0x3:
/* generate Event element for all 4 events */
- config->eventctrl1 |= val & 0xF;
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+ TRCEVENTCTL1R_INSTEN_1 |
+ TRCEVENTCTL1R_INSTEN_2 |
+ TRCEVENTCTL1R_INSTEN_3);
break;
default:
break;
@@ -702,10 +707,10 @@ static ssize_t bb_ctrl_store(struct device *dev,
* individual range comparators. If include then at least 1
* range must be selected.
*/
- if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
+ if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
return -EINVAL;
- config->bb_ctrl = val & GENMASK(8, 0);
+ config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -718,7 +723,7 @@ static ssize_t event_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = config->vinst_ctrl & ETMv4_EVENT_MASK;
+ val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -734,9 +739,9 @@ static ssize_t event_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- val &= ETMv4_EVENT_MASK;
- config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
- config->vinst_ctrl |= val;
+ val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
+ config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -750,7 +755,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
+ val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -767,10 +772,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
- config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
- config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
+ config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -785,7 +790,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
+ val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -802,10 +807,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
- config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
- config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
+ config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -858,11 +863,11 @@ static ssize_t addr_instdatatype_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
- val = BMVAL(config->addr_acc[idx], 0, 1);
+ val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n",
- val == ETM_INSTR_ADDR ? "instr" :
- (val == ETM_DATA_LOAD_ADDR ? "data_load" :
- (val == ETM_DATA_STORE_ADDR ? "data_store" :
+ val == TRCACATRn_TYPE_ADDR ? "instr" :
+ (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
+ (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
"data_load_store")));
spin_unlock(&drvdata->spinlock);
return len;
@@ -886,7 +891,7 @@ static ssize_t addr_instdatatype_store(struct device *dev,
idx = config->addr_idx;
if (!strcmp(str, "instr"))
/* TYPE, bits[1:0] */
- config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+ config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1144,7 +1149,7 @@ static ssize_t addr_ctxtype_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* CONTEXTTYPE, bits[3:2] */
- val = BMVAL(config->addr_acc[idx], 2, 3);
+ val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
(val == ETM_CTX_CTXID ? "ctxid" :
(val == ETM_CTX_VMID ? "vmid" : "all")));
@@ -1170,18 +1175,18 @@ static ssize_t addr_ctxtype_store(struct device *dev,
idx = config->addr_idx;
if (!strcmp(str, "none"))
/* start by clearing context type bits */
- config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
else if (!strcmp(str, "ctxid")) {
/* 0b01 The trace unit performs a Context ID */
if (drvdata->numcidc) {
- config->addr_acc[idx] |= BIT(2);
- config->addr_acc[idx] &= ~BIT(3);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
}
} else if (!strcmp(str, "vmid")) {
/* 0b10 The trace unit performs a VMID */
if (drvdata->numvmidc) {
- config->addr_acc[idx] &= ~BIT(2);
- config->addr_acc[idx] |= BIT(3);
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
}
} else if (!strcmp(str, "all")) {
/*
@@ -1189,9 +1194,9 @@ static ssize_t addr_ctxtype_store(struct device *dev,
* comparison and a VMID
*/
if (drvdata->numcidc)
- config->addr_acc[idx] |= BIT(2);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
if (drvdata->numvmidc)
- config->addr_acc[idx] |= BIT(3);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
}
spin_unlock(&drvdata->spinlock);
return size;
@@ -1210,7 +1215,7 @@ static ssize_t addr_context_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* context ID comparator bits[6:4] */
- val = BMVAL(config->addr_acc[idx], 4, 6);
+ val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1235,8 +1240,8 @@ static ssize_t addr_context_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear context ID comparator bits[6:4] */
- config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
- config->addr_acc[idx] |= (val << 4);
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
+ config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1253,7 +1258,7 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
- val = BMVAL(config->addr_acc[idx], 8, 14);
+ val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1270,14 +1275,14 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val & ~((GENMASK(14, 8) >> 8)))
+ if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
- config->addr_acc[idx] &= ~(GENMASK(14, 8));
- config->addr_acc[idx] |= (val << 8);
+ config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
+ config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1721,8 +1726,11 @@ static ssize_t res_ctrl_store(struct device *dev,
/* For odd idx pair inversal bit is RES0 */
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
- val &= ~BIT(21);
- config->res_ctrl[idx] = val & GENMASK(21, 0);
+ val &= ~TRCRSCTLRn_PAIRINV;
+ config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
+ TRCRSCTLRn_INV |
+ TRCRSCTLRn_GROUP_MASK |
+ TRCRSCTLRn_SELECT_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1787,9 +1795,9 @@ static ssize_t sshot_ctrl_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
- config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
/* must clear bit 31 in related status register on programming */
- config->ss_status[idx] &= ~BIT(31);
+ config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1837,9 +1845,9 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
- config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
/* must clear bit 31 in related status register on programming */
- config->ss_status[idx] &= ~BIT(31);
+ config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
spin_unlock(&drvdata->spinlock);
return size;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 3c4d69b096ca..33869c1d20c3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -131,6 +131,104 @@
#define TRCRSR_TA BIT(12)
/*
+ * Bit positions of registers that are defined above, in the sysreg.h style
+ * of _MASK for multi bit fields and BIT() for single bits.
+ */
+#define TRCIDR0_INSTP0_MASK GENMASK(2, 1)
+#define TRCIDR0_TRCBB BIT(5)
+#define TRCIDR0_TRCCOND BIT(6)
+#define TRCIDR0_TRCCCI BIT(7)
+#define TRCIDR0_RETSTACK BIT(9)
+#define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10)
+#define TRCIDR0_QSUPP_MASK GENMASK(16, 15)
+#define TRCIDR0_TSSIZE_MASK GENMASK(28, 24)
+
+#define TRCIDR2_CIDSIZE_MASK GENMASK(9, 5)
+#define TRCIDR2_VMIDSIZE_MASK GENMASK(14, 10)
+#define TRCIDR2_CCSIZE_MASK GENMASK(28, 25)
+
+#define TRCIDR3_CCITMIN_MASK GENMASK(11, 0)
+#define TRCIDR3_EXLEVEL_S_MASK GENMASK(19, 16)
+#define TRCIDR3_EXLEVEL_NS_MASK GENMASK(23, 20)
+#define TRCIDR3_TRCERR BIT(24)
+#define TRCIDR3_SYNCPR BIT(25)
+#define TRCIDR3_STALLCTL BIT(26)
+#define TRCIDR3_SYSSTALL BIT(27)
+#define TRCIDR3_NUMPROC_LO_MASK GENMASK(30, 28)
+#define TRCIDR3_NUMPROC_HI_MASK GENMASK(13, 12)
+#define TRCIDR3_NOOVERFLOW BIT(31)
+
+#define TRCIDR4_NUMACPAIRS_MASK GENMASK(3, 0)
+#define TRCIDR4_NUMPC_MASK GENMASK(15, 12)
+#define TRCIDR4_NUMRSPAIR_MASK GENMASK(19, 16)
+#define TRCIDR4_NUMSSCC_MASK GENMASK(23, 20)
+#define TRCIDR4_NUMCIDC_MASK GENMASK(27, 24)
+#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
+
+#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
+#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
+#define TRCIDR5_ATBTRIG BIT(22)
+#define TRCIDR5_LPOVERRIDE BIT(23)
+#define TRCIDR5_NUMSEQSTATE_MASK GENMASK(27, 25)
+#define TRCIDR5_NUMCNTR_MASK GENMASK(30, 28)
+
+#define TRCCONFIGR_INSTP0_LOAD BIT(1)
+#define TRCCONFIGR_INSTP0_STORE BIT(2)
+#define TRCCONFIGR_INSTP0_LOAD_STORE (TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE)
+#define TRCCONFIGR_BB BIT(3)
+#define TRCCONFIGR_CCI BIT(4)
+#define TRCCONFIGR_CID BIT(6)
+#define TRCCONFIGR_VMID BIT(7)
+#define TRCCONFIGR_COND_MASK GENMASK(10, 8)
+#define TRCCONFIGR_TS BIT(11)
+#define TRCCONFIGR_RS BIT(12)
+#define TRCCONFIGR_QE_W_COUNTS BIT(13)
+#define TRCCONFIGR_QE_WO_COUNTS BIT(14)
+#define TRCCONFIGR_VMIDOPT BIT(15)
+#define TRCCONFIGR_DA BIT(16)
+#define TRCCONFIGR_DV BIT(17)
+
+#define TRCEVENTCTL1R_INSTEN_MASK GENMASK(3, 0)
+#define TRCEVENTCTL1R_INSTEN_0 BIT(0)
+#define TRCEVENTCTL1R_INSTEN_1 BIT(1)
+#define TRCEVENTCTL1R_INSTEN_2 BIT(2)
+#define TRCEVENTCTL1R_INSTEN_3 BIT(3)
+#define TRCEVENTCTL1R_ATB BIT(11)
+#define TRCEVENTCTL1R_LPOVERRIDE BIT(12)
+
+#define TRCSTALLCTLR_ISTALL BIT(8)
+#define TRCSTALLCTLR_INSTPRIORITY BIT(10)
+#define TRCSTALLCTLR_NOOVERFLOW BIT(13)
+
+#define TRCVICTLR_EVENT_MASK GENMASK(7, 0)
+#define TRCVICTLR_SSSTATUS BIT(9)
+#define TRCVICTLR_TRCRESET BIT(10)
+#define TRCVICTLR_TRCERR BIT(11)
+#define TRCVICTLR_EXLEVEL_MASK GENMASK(22, 16)
+#define TRCVICTLR_EXLEVEL_S_MASK GENMASK(19, 16)
+#define TRCVICTLR_EXLEVEL_NS_MASK GENMASK(22, 20)
+
+#define TRCACATRn_TYPE_MASK GENMASK(1, 0)
+#define TRCACATRn_CONTEXTTYPE_MASK GENMASK(3, 2)
+#define TRCACATRn_CONTEXTTYPE_CTXID BIT(2)
+#define TRCACATRn_CONTEXTTYPE_VMID BIT(3)
+#define TRCACATRn_CONTEXT_MASK GENMASK(6, 4)
+#define TRCACATRn_EXLEVEL_MASK GENMASK(14, 8)
+
+#define TRCSSCSRn_STATUS BIT(31)
+#define TRCSSCCRn_SAC_ARC_RST_MASK GENMASK(24, 0)
+
+#define TRCSSPCICRn_PC_MASK GENMASK(7, 0)
+
+#define TRCBBCTLR_MODE BIT(8)
+#define TRCBBCTLR_RANGE_MASK GENMASK(7, 0)
+
+#define TRCRSCTLRn_PAIRINV BIT(21)
+#define TRCRSCTLRn_INV BIT(20)
+#define TRCRSCTLRn_GROUP_MASK GENMASK(19, 16)
+#define TRCRSCTLRn_SELECT_MASK GENMASK(15, 0)
+
+/*
* System instructions to access ETM registers.
* See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
*/
@@ -630,23 +728,9 @@
#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
-#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
-#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
-#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
-
/* access level controls in TRCACATRn */
#define TRCACATR_EXLEVEL_SHIFT 8
-/* access level control in TRCVICTLR */
-#define TRCVICTLR_EXLEVEL_SHIFT 16
-#define TRCVICTLR_EXLEVEL_S_SHIFT 16
-#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
-
-/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-
#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
#define ETM_TRCIDR1_ARCH_MAJOR(x) \
@@ -986,10 +1070,10 @@ struct etmv4_drvdata {
/* Address comparator access types */
enum etm_addr_acctype {
- ETM_INSTR_ADDR,
- ETM_DATA_LOAD_ADDR,
- ETM_DATA_STORE_ADDR,
- ETM_DATA_LOAD_STORE_ADDR,
+ TRCACATRn_TYPE_ADDR,
+ TRCACATRn_TYPE_DATA_LOAD_ADDR,
+ TRCACATRn_TYPE_DATA_STORE_ADDR,
+ TRCACATRn_TYPE_DATA_LOAD_STORE_ADDR,
};
/* Address comparator context types */
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index eac3f02662ae..b53f010f3e40 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -290,7 +290,6 @@ config DA311
config DMARD06
tristate "Domintech DMARD06 Digital Accelerometer Driver"
- depends on OF || COMPILE_TEST
depends on I2C
help
Say yes here to build support for the Domintech low-g tri-axial
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index e9c10c8c32f0..7561399daef3 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -18,7 +18,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/units.h>
@@ -745,10 +745,7 @@ int adxl355_core_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- /*
- * TODO: Would be good to move it to the generic version.
- */
- irq = of_irq_get_byname(dev->of_node, "DRDY");
+ irq = fwnode_irq_get_byname(dev_fwnode(dev), "DRDY");
if (irq > 0) {
ret = adxl355_probe_trigger(indio_dev, irq);
if (ret)
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 62960134ea19..0289ed8cf2c6 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1567,7 +1567,6 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
return ret;
ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&adxl367_buffer_ops,
adxl367_fifo_attributes);
if (ret)
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 7516d7dde1af..57e8a8350cd1 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1525,7 +1525,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
int ret = 0;
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
return 0;
mutex_lock(&data->mutex);
@@ -1557,7 +1557,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
return 0;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index 53ab6078cb7f..cb0246ca72f3 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -24,7 +24,7 @@
#define DMARD09_AXIS_Y 1
#define DMARD09_AXIS_Z 2
#define DMARD09_AXIS_X_OFFSET ((DMARD09_AXIS_X + 1) * 2)
-#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1 )* 2)
+#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1) * 2)
#define DMARD09_AXIS_Z_OFFSET ((DMARD09_AXIS_Z + 1) * 2)
struct dmard09_data {
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index a9d2f10d5d45..8874d6d61725 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -1217,7 +1217,6 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
return ret;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&fxls8962af_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index ec17e35e573e..b7b5af45429e 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -44,8 +44,8 @@ static const struct spi_device_id kxsd9_spi_id[] = {
MODULE_DEVICE_TABLE(spi, kxsd9_spi_id);
static const struct of_device_id kxsd9_of_match[] = {
- { .compatible = "kionix,kxsd9" },
- { },
+ { .compatible = "kionix,kxsd9" },
+ { }
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 9c02c681c84c..912a447e6310 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -166,6 +166,7 @@ static const struct mma8452_event_regs trans_ev_regs = {
/**
* struct mma_chip_info - chip specific data
+ * @name: part number of device reported via 'name' attr
* @chip_id: WHO_AM_I register's value
* @channels: struct iio_chan_spec matching the device's
* capabilities
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 83c81072511e..29a68a7d34cd 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1474,7 +1474,6 @@ static int sca3000_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&spi->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&sca3000_ring_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c
index a1164b439f41..7ca9d0d543e0 100644
--- a/drivers/iio/accel/ssp_accel_sensor.c
+++ b/drivers/iio/accel/ssp_accel_sensor.c
@@ -113,7 +113,6 @@ static int ssp_accel_probe(struct platform_device *pdev)
indio_dev->available_scan_masks = ssp_accel_scan_mask;
ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ssp_accel_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 00e056c21bfc..5b0f54e33d9e 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -14,32 +14,6 @@
#include <linux/types.h>
#include <linux/iio/common/st_sensors.h>
-enum st_accel_type {
- LSM303DLH,
- LSM303DLHC,
- LIS3DH,
- LSM330D,
- LSM330DL,
- LSM330DLC,
- LIS331DLH,
- LSM303DL,
- LSM303DLM,
- LSM330,
- LSM303AGR,
- LIS2DH12,
- LIS3L02DQ,
- LNG2DM,
- H3LIS331DL,
- LIS331DL,
- LIS3LV02DL,
- LIS2DW12,
- LIS3DHH,
- LIS2DE12,
- LIS2HH12,
- SC7A20,
- ST_ACCEL_MAX,
-};
-
#define H3LIS331DL_ACCEL_DEV_NAME "h3lis331dl_accel"
#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
@@ -62,8 +36,10 @@ enum st_accel_type {
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
+#define LIS302DL_ACCEL_DEV_NAME "lis302dl"
#define SC7A20_ACCEL_DEV_NAME "sc7a20"
+
#ifdef CONFIG_IIO_BUFFER
int st_accel_allocate_ring(struct iio_dev *indio_dev);
int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 5c5da6fdb490..c8c8eb15c34e 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -444,6 +444,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
+ [1] = LIS302DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
.odr = {
@@ -1209,28 +1210,21 @@ read_error:
static int st_accel_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE: {
int gain;
gain = val * 1000000 + val2;
- err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, gain);
}
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 96adc4344f4a..45ee0ddc133c 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -108,6 +108,10 @@ static const struct of_device_id st_accel_of_match[] = {
.data = LIS2HH12_ACCEL_DEV_NAME,
},
{
+ .compatible = "st,lis302dl",
+ .data = LIS302DL_ACCEL_DEV_NAME,
+ },
+ {
.compatible = "silan,sc7a20",
.data = SC7A20_ACCEL_DEV_NAME,
},
@@ -146,6 +150,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
{ LIS2HH12_ACCEL_DEV_NAME },
+ { LIS302DL_ACCEL_DEV_NAME },
{ SC7A20_ACCEL_DEV_NAME },
{},
};
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 108b63d0146c..6c0917750288 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -92,6 +92,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3de",
.data = LIS3DE_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis302dl",
+ .data = LIS302DL_ACCEL_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -147,6 +151,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DHH_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
+ { LIS302DL_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 71ab0a06aa82..48ace7412874 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -910,7 +910,7 @@ config ROCKCHIP_SARADC
config RZG2L_ADC
tristate "Renesas RZ/G2L ADC driver"
- depends on ARCH_R9A07G044 || COMPILE_TEST
+ depends on ARCH_RZG2L || COMPILE_TEST
help
Say yes here to build support for the ADC found in Renesas
RZ/G2L family.
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index c47ead15f6e5..c5b785d8b241 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -43,6 +43,8 @@
#define AD7124_STATUS_POR_FLAG_MSK BIT(4)
/* AD7124_ADC_CONTROL */
+#define AD7124_ADC_STATUS_EN_MSK BIT(10)
+#define AD7124_ADC_STATUS_EN(x) FIELD_PREP(AD7124_ADC_STATUS_EN_MSK, x)
#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8)
#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
#define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6)
@@ -188,7 +190,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
.sign = 'u',
.realbits = 24,
.storagebits = 32,
- .shift = 8,
.endianness = IIO_BE,
},
};
@@ -501,26 +502,70 @@ static int ad7124_prepare_read(struct ad7124_state *st, int address)
return ad7124_enable_channel(st, &st->channels[address]);
}
+static int __ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+ return ad7124_prepare_read(st, channel);
+}
+
static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
int ret;
mutex_lock(&st->cfgs_lock);
- ret = ad7124_prepare_read(st, channel);
+ ret = __ad7124_set_channel(sd, channel);
mutex_unlock(&st->cfgs_lock);
return ret;
}
+static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ unsigned int adc_control = st->adc_control;
+ int ret;
+
+ adc_control &= ~AD7124_ADC_STATUS_EN_MSK;
+ adc_control |= AD7124_ADC_STATUS_EN(append);
+
+ ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, adc_control);
+ if (ret < 0)
+ return ret;
+
+ st->adc_control = adc_control;
+
+ return 0;
+}
+
+static int ad7124_disable_all(struct ad_sigma_delta *sd)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < st->num_channels; i++) {
+ ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.set_channel = ad7124_set_channel,
+ .append_status = ad7124_append_status,
+ .disable_all = ad7124_disable_all,
.set_mode = ad7124_set_mode,
.has_registers = true,
.addr_shift = 0,
.read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
.data_reg = AD7124_DATA,
- .irq_flags = IRQF_TRIGGER_FALLING
+ .num_slots = 8,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7124_read_raw(struct iio_dev *indio_dev,
@@ -670,11 +715,40 @@ static const struct attribute_group ad7124_attrs_group = {
.attrs = ad7124_attributes,
};
+static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+ bool bit_set;
+ int ret;
+ int i;
+
+ mutex_lock(&st->cfgs_lock);
+ for (i = 0; i < st->num_channels; i++) {
+ bit_set = test_bit(i, scan_mask);
+ if (bit_set)
+ ret = __ad7124_set_channel(&st->sd, i);
+ else
+ ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK,
+ 0, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->cfgs_lock);
+
+ return ret;
+ }
+ }
+
+ mutex_unlock(&st->cfgs_lock);
+
+ return 0;
+}
+
static const struct iio_info ad7124_info = {
.read_raw = ad7124_read_raw,
.write_raw = ad7124_write_raw,
.debugfs_reg_access = &ad7124_reg_access,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7124_update_scan_mode,
.attrs = &ad7124_attrs_group,
};
@@ -886,12 +960,14 @@ static int ad7124_probe(struct spi_device *spi)
st->chip_info = info;
- ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
-
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7124_info;
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
+ if (ret < 0)
+ return ret;
+
ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 770b4e59238f..d71977be7d22 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -58,7 +58,8 @@
/* Mode Register Bit Designations (AD7192_REG_MODE) */
#define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */
#define AD7192_MODE_SEL_MASK (0x7 << 21) /* Operation Mode Select Mask */
-#define AD7192_MODE_DAT_STA BIT(20) /* Status Register transmission */
+#define AD7192_MODE_STA(x) (((x) & 0x1) << 20) /* Status Register transmission */
+#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
@@ -225,7 +226,7 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
bool sys_calib;
int ret, temp;
- ret = strtobool(buf, &sys_calib);
+ ret = kstrtobool(buf, &sys_calib);
if (ret)
return ret;
@@ -288,12 +289,51 @@ static int ad7192_set_mode(struct ad_sigma_delta *sd,
return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
}
+static int ad7192_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+ unsigned int mode = st->mode;
+ int ret;
+
+ mode &= ~AD7192_MODE_STA_MASK;
+ mode |= AD7192_MODE_STA(append);
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, mode);
+ if (ret < 0)
+ return ret;
+
+ st->mode = mode;
+
+ return 0;
+}
+
+static int ad7192_disable_all(struct ad_sigma_delta *sd)
+{
+ struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+ u32 conf = st->conf;
+ int ret;
+
+ conf &= ~AD7192_CONF_CHAN_MASK;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+ if (ret < 0)
+ return ret;
+
+ st->conf = conf;
+
+ return 0;
+}
+
static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.set_channel = ad7192_set_channel,
+ .append_status = ad7192_append_status,
+ .disable_all = ad7192_disable_all,
.set_mode = ad7192_set_mode,
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
+ .num_slots = 4,
.irq_flags = IRQF_TRIGGER_FALLING,
};
@@ -457,7 +497,7 @@ static ssize_t ad7192_set(struct device *dev,
int ret;
bool val;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
@@ -783,6 +823,26 @@ static int ad7192_read_avail(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ u32 conf = st->conf;
+ int ret;
+ int i;
+
+ conf &= ~AD7192_CONF_CHAN_MASK;
+ for_each_set_bit(i, scan_mask, 8)
+ conf |= AD7192_CONF_CHAN(i);
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+ if (ret < 0)
+ return ret;
+
+ st->conf = conf;
+
+ return 0;
+}
+
static const struct iio_info ad7192_info = {
.read_raw = ad7192_read_raw,
.write_raw = ad7192_write_raw,
@@ -790,6 +850,7 @@ static const struct iio_info ad7192_info = {
.read_avail = ad7192_read_avail,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7192_update_scan_mode,
};
static const struct iio_info ad7195_info = {
@@ -799,6 +860,7 @@ static const struct iio_info ad7195_info = {
.read_avail = ad7192_read_avail,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7192_update_scan_mode,
};
#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c17d9b5fbaf6..f20d39f0bc01 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -378,6 +378,11 @@ static const char * const ad7266_gpio_labels[] = {
"ad0", "ad1", "ad2",
};
+static void ad7266_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
@@ -398,9 +403,13 @@ static int ad7266_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7266_reg_disable, st->reg);
+ if (ret)
+ return ret;
+
ret = regulator_get_voltage(st->reg);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
st->vref_mv = ret / 1000;
} else {
@@ -423,7 +432,7 @@ static int ad7266_probe(struct spi_device *spi)
GPIOD_OUT_LOW);
if (IS_ERR(st->gpios[i])) {
ret = PTR_ERR(st->gpios[i]);
- goto error_disable_reg;
+ return ret;
}
}
}
@@ -433,7 +442,6 @@ static int ad7266_probe(struct spi_device *spi)
st->mode = AD7266_MODE_DIFF;
}
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
@@ -459,35 +467,12 @@ static int ad7266_probe(struct spi_device *spi)
spi_message_add_tail(&st->single_xfer[1], &st->single_msg);
spi_message_add_tail(&st->single_xfer[2], &st->single_msg);
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, &iio_pollfunc_store_time,
&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_buffer_cleanup;
-
- return 0;
-
-error_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static void ad7266_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7266_state *st = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7266_id[] = {
@@ -502,7 +487,6 @@ static struct spi_driver ad7266_driver = {
.name = "ad7266",
},
.probe = ad7266_probe,
- .remove = ad7266_remove,
.id_table = ad7266_id,
};
module_spi_driver(ad7266_driver);
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index ec9acbf12b9a..3bdf3d9422f2 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -488,7 +488,7 @@ static ssize_t ad7280_store_balance_sw(struct iio_dev *indio_dev,
bool readin;
int ret;
- ret = strtobool(buf, &readin);
+ ret = kstrtobool(buf, &readin);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index ebcd52526cac..261a9a6b45e1 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/align.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -342,15 +343,49 @@ EXPORT_SYMBOL_NS_GPL(ad_sigma_delta_single_conversion, IIO_AD_SIGMA_DELTA);
static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned int i, slot, samples_buf_size;
unsigned int channel;
+ uint8_t *samples_buf;
int ret;
- channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
- ret = ad_sigma_delta_set_channel(sigma_delta,
- indio_dev->channels[channel].address);
- if (ret)
- return ret;
+ if (sigma_delta->num_slots == 1) {
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ ret = ad_sigma_delta_set_channel(sigma_delta,
+ indio_dev->channels[channel].address);
+ if (ret)
+ return ret;
+ slot = 1;
+ } else {
+ /*
+ * At this point update_scan_mode already enabled the required channels.
+ * For sigma-delta sequencer drivers with multiple slots, an update_scan_mode
+ * implementation is mandatory.
+ */
+ slot = 0;
+ for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) {
+ sigma_delta->slots[slot] = indio_dev->channels[i].address;
+ slot++;
+ }
+ }
+
+ sigma_delta->active_slots = slot;
+ sigma_delta->current_slot = 0;
+
+ if (sigma_delta->active_slots > 1) {
+ ret = ad_sigma_delta_append_status(sigma_delta, true);
+ if (ret)
+ return ret;
+ }
+
+ samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8);
+ samples_buf_size += sizeof(int64_t);
+ samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf,
+ samples_buf_size, GFP_KERNEL);
+ if (!samples_buf)
+ return -ENOMEM;
+
+ sigma_delta->samples_buf = samples_buf;
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
@@ -386,6 +421,10 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ if (sigma_delta->status_appended)
+ ad_sigma_delta_append_status(sigma_delta, false);
+
+ ad_sigma_delta_disable_all(sigma_delta);
sigma_delta->bus_locked = false;
return spi_bus_unlock(sigma_delta->spi->master);
}
@@ -396,6 +435,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
uint8_t *data = sigma_delta->rx_buf;
+ unsigned int transfer_size;
+ unsigned int sample_size;
+ unsigned int sample_pos;
+ unsigned int status_pos;
unsigned int reg_size;
unsigned int data_reg;
@@ -408,21 +451,69 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
else
data_reg = AD_SD_REG_DATA;
+ /* Status word will be appended to the sample during transfer */
+ if (sigma_delta->status_appended)
+ transfer_size = reg_size + 1;
+ else
+ transfer_size = reg_size;
+
switch (reg_size) {
case 4:
case 2:
case 1:
- ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[0]);
+ status_pos = reg_size;
+ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[0]);
break;
case 3:
+ /*
+ * Data array after transfer will look like (if status is appended):
+ * data[] = { [0][sample][sample][sample][status] }
+ * Keeping the first byte 0 shifts the status postion by 1 byte to the right.
+ */
+ status_pos = reg_size + 1;
+
/* We store 24 bit samples in a 32 bit word. Keep the upper
* byte set to zero. */
- ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[1]);
+ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
break;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+ /*
+ * For devices sampling only one channel at
+ * once, there is no need for sample number tracking.
+ */
+ if (sigma_delta->active_slots == 1) {
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+ goto irq_handled;
+ }
+
+ if (sigma_delta->status_appended) {
+ u8 converted_channel;
+
+ converted_channel = data[status_pos] & sigma_delta->info->status_ch_mask;
+ if (converted_channel != sigma_delta->slots[sigma_delta->current_slot]) {
+ /*
+ * Desync occurred during continuous sampling of multiple channels.
+ * Drop this incomplete sample and start from first channel again.
+ */
+
+ sigma_delta->current_slot = 0;
+ goto irq_handled;
+ }
+ }
+
+ sample_size = indio_dev->channels[0].scan_type.storagebits / 8;
+ sample_pos = sample_size * sigma_delta->current_slot;
+ memcpy(&sigma_delta->samples_buf[sample_pos], data, sample_size);
+ sigma_delta->current_slot++;
+ if (sigma_delta->current_slot == sigma_delta->active_slots) {
+ sigma_delta->current_slot = 0;
+ iio_push_to_buffers_with_timestamp(indio_dev, sigma_delta->samples_buf,
+ pf->timestamp);
+ }
+
+irq_handled:
iio_trigger_notify_done(indio_dev->trig);
sigma_delta->irq_dis = false;
enable_irq(sigma_delta->spi->irq);
@@ -430,10 +521,17 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
+static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned long *mask)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+ return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots;
+}
+
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
.postenable = &ad_sd_buffer_postenable,
.postdisable = &ad_sd_buffer_postdisable,
- .validate_scan_mask = &iio_validate_scan_mask_onehot,
+ .validate_scan_mask = &ad_sd_validate_scan_mask,
};
static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
@@ -513,8 +611,14 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
*/
int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev)
{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
int ret;
+ sigma_delta->slots = devm_kcalloc(dev, sigma_delta->num_slots,
+ sizeof(*sigma_delta->slots), GFP_KERNEL);
+ if (!sigma_delta->slots)
+ return -ENOMEM;
+
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
&ad_sd_trigger_handler,
@@ -541,6 +645,25 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
{
sigma_delta->spi = spi;
sigma_delta->info = info;
+
+ /* If the field is unset in ad_sigma_delta_info, asume there can only be 1 slot. */
+ if (!info->num_slots)
+ sigma_delta->num_slots = 1;
+ else
+ sigma_delta->num_slots = info->num_slots;
+
+ if (sigma_delta->num_slots > 1) {
+ if (!indio_dev->info->update_scan_mode) {
+ dev_err(&spi->dev, "iio_dev lacks update_scan_mode().\n");
+ return -EINVAL;
+ }
+
+ if (!info->disable_all) {
+ dev_err(&spi->dev, "ad_sigma_delta_info lacks disable_all().\n");
+ return -EINVAL;
+ }
+ }
+
iio_device_set_drvdata(indio_dev, sigma_delta);
return 0;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 854b1f81d807..b764823ce57e 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1117,7 +1117,7 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
return at91_adc_configure_touch(st, true);
/* if we are not in triggered mode, we cannot enable the buffer. */
- if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
/* we continue with the triggered buffer */
@@ -1159,7 +1159,7 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
return at91_adc_configure_touch(st, false);
/* if we are not in triggered mode, nothing to do here */
- if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
/*
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 8d902a32a0fd..abad16803849 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
bool val;
int ret;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret)
return ret;
@@ -1027,7 +1027,6 @@ static int ina2xx_probe(struct i2c_client *client,
indio_dev->name = id->name;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ina2xx_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 61e80bf3d05e..fd000345ec5c 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -376,7 +376,8 @@ static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
adc->adc_info[adc_chan].gain_error;
if (val < 0) {
- dev_err(adc->dev, "Mismatch with calibration\n");
+ if (val < -10)
+ dev_err(adc->dev, "Mismatch with calibration var = %d\n", val);
return 0;
}
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index 00098caf6d9e..e9ff2d6a8a57 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -9,12 +9,16 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* PMIC global registers definition */
-#define SC27XX_MODULE_EN 0xc08
+#define SC2730_MODULE_EN 0x1808
+#define SC2731_MODULE_EN 0xc08
#define SC27XX_MODULE_ADC_EN BIT(5)
-#define SC27XX_ARM_CLK_EN 0xc10
+#define SC2721_ARM_CLK_EN 0xc0c
+#define SC2730_ARM_CLK_EN 0x180c
+#define SC2731_ARM_CLK_EN 0xc10
#define SC27XX_CLK_ADC_EN BIT(5)
#define SC27XX_CLK_ADC_CLK_EN BIT(6)
@@ -36,8 +40,10 @@
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
-#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
-#define SC27XX_ADC_SCALE_SHIFT 8
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
+#define SC2721_ADC_SCALE_MASK BIT(5)
+#define SC27XX_ADC_SCALE_SHIFT 9
+#define SC2721_ADC_SCALE_SHIFT 5
/* Bits definitions for SC27XX_ADC_INT_EN registers */
#define SC27XX_ADC_IRQ_EN BIT(0)
@@ -67,8 +73,15 @@
#define SC27XX_RATIO_NUMERATOR_OFFSET 16
#define SC27XX_RATIO_DENOMINATOR_MASK GENMASK(15, 0)
+/* ADC specific channel reference voltage 3.5V */
+#define SC27XX_ADC_REFVOL_VDD35 3500000
+
+/* ADC default channel reference voltage is 2.8V */
+#define SC27XX_ADC_REFVOL_VDD28 2800000
+
struct sc27xx_adc_data {
struct device *dev;
+ struct regulator *volref;
struct regmap *regmap;
/*
* One hardware spinlock to synchronize between the multiple
@@ -78,6 +91,24 @@ struct sc27xx_adc_data {
int channel_scale[SC27XX_ADC_CHANNEL_MAX];
u32 base;
int irq;
+ const struct sc27xx_adc_variant_data *var_data;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address and ratio, we should save ratio config and base
+ * in the device data structure.
+ */
+struct sc27xx_adc_variant_data {
+ u32 module_en;
+ u32 clk_en;
+ u32 scale_shift;
+ u32 scale_mask;
+ const struct sc27xx_adc_linear_graph *bscale_cal;
+ const struct sc27xx_adc_linear_graph *sscale_cal;
+ void (*init_scale)(struct sc27xx_adc_data *data);
+ int (*get_ratio)(int channel, int scale);
+ bool set_volref;
};
struct sc27xx_adc_linear_graph {
@@ -103,6 +134,16 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
100, 341,
};
+static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
+ 4200, 850,
+ 3600, 728,
+};
+
+static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
+ 1000, 838,
+ 100, 84,
+};
+
static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
4200, 856,
3600, 733,
@@ -118,49 +159,225 @@ static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
return ((calib_data & 0xff) + calib_adc - 128) * 4;
}
+/* get the adc nvmem cell calibration data */
+static int adc_nvmem_cell_calib_data(struct sc27xx_adc_data *data, const char *cell_name)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ u32 origin_calib_data = 0;
+ size_t len;
+
+ if (!data)
+ return -EINVAL;
+
+ cell = nvmem_cell_get(data->dev, cell_name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+
+ memcpy(&origin_calib_data, buf, min(len, sizeof(u32)));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return origin_calib_data;
+}
+
static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
bool big_scale)
{
const struct sc27xx_adc_linear_graph *calib_graph;
struct sc27xx_adc_linear_graph *graph;
- struct nvmem_cell *cell;
const char *cell_name;
u32 calib_data = 0;
- void *buf;
- size_t len;
if (big_scale) {
- calib_graph = &big_scale_graph_calib;
+ calib_graph = data->var_data->bscale_cal;
graph = &big_scale_graph;
cell_name = "big_scale_calib";
} else {
- calib_graph = &small_scale_graph_calib;
+ calib_graph = data->var_data->sscale_cal;
graph = &small_scale_graph;
cell_name = "small_scale_calib";
}
- cell = nvmem_cell_get(data->dev, cell_name);
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- buf = nvmem_cell_read(cell, &len);
- nvmem_cell_put(cell);
-
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- memcpy(&calib_data, buf, min(len, sizeof(u32)));
+ calib_data = adc_nvmem_cell_calib_data(data, cell_name);
/* Only need to calibrate the adc values in the linear graph. */
graph->adc0 = sc27xx_adc_get_calib_data(calib_data, calib_graph->adc0);
graph->adc1 = sc27xx_adc_get_calib_data(calib_data >> 8,
calib_graph->adc1);
- kfree(buf);
return 0;
}
-static int sc27xx_adc_get_ratio(int channel, int scale)
+static int sc2720_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 14:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 1:
+ return SC27XX_VOLT_RATIO(68, 1760);
+ case 2:
+ return SC27XX_VOLT_RATIO(68, 2327);
+ case 3:
+ return SC27XX_VOLT_RATIO(68, 3654);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 16:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 1:
+ return SC27XX_VOLT_RATIO(480, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(480, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(48, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 21:
+ case 22:
+ case 23:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(3, 8);
+ case 1:
+ return SC27XX_VOLT_RATIO(375, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(375, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(300, 3248);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ default:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 1);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(1000, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(100, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2721_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return scale ? SC27XX_VOLT_RATIO(400, 1025) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 5:
+ return SC27XX_VOLT_RATIO(7, 29);
+ case 7:
+ case 9:
+ return scale ? SC27XX_VOLT_RATIO(100, 125) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 14:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 16:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 19:
+ return SC27XX_VOLT_RATIO(1, 3);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2730_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 14:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 1:
+ return SC27XX_VOLT_RATIO(68, 1760);
+ case 2:
+ return SC27XX_VOLT_RATIO(68, 2327);
+ case 3:
+ return SC27XX_VOLT_RATIO(68, 3654);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 15:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 3);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 5865);
+ case 2:
+ return SC27XX_VOLT_RATIO(500, 3879);
+ case 3:
+ return SC27XX_VOLT_RATIO(500, 6090);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 16:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 1:
+ return SC27XX_VOLT_RATIO(480, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(480, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(48, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 21:
+ case 22:
+ case 23:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(3, 8);
+ case 1:
+ return SC27XX_VOLT_RATIO(375, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(375, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(300, 3248);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ default:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 1);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(1000, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(1000, 4060);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2731_adc_get_ratio(int channel, int scale)
{
switch (channel) {
case 1:
@@ -185,10 +402,87 @@ static int sc27xx_adc_get_ratio(int channel, int scale)
return SC27XX_VOLT_RATIO(1, 1);
}
+/*
+ * According to the datasheet set specific value on some channel.
+ */
+static void sc2720_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ data->channel_scale[i] = 3;
+ break;
+ case 7:
+ case 9:
+ data->channel_scale[i] = 2;
+ break;
+ case 13:
+ data->channel_scale[i] = 1;
+ break;
+ case 19:
+ case 30:
+ case 31:
+ data->channel_scale[i] = 3;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
+static void sc2730_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ case 10:
+ case 19:
+ case 30:
+ case 31:
+ data->channel_scale[i] = 3;
+ break;
+ case 7:
+ case 9:
+ data->channel_scale[i] = 2;
+ break;
+ case 13:
+ data->channel_scale[i] = 1;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
+static void sc2731_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+ /*
+ * In the current software design, SC2731 support 2 scales,
+ * channels 5 uses big scale, others use smale.
+ */
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ data->channel_scale[i] = 1;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
int scale, int *val)
{
- int ret;
+ int ret, ret_volref;
u32 tmp, value, status;
ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
@@ -197,10 +491,25 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
return ret;
}
+ /*
+ * According to the sc2721 chip data sheet, the reference voltage of
+ * specific channel 30 and channel 31 in ADC module needs to be set from
+ * the default 2.8v to 3.5v.
+ */
+ if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+ ret = regulator_set_voltage(data->volref,
+ SC27XX_ADC_REFVOL_VDD35,
+ SC27XX_ADC_REFVOL_VDD35);
+ if (ret) {
+ dev_err(data->dev, "failed to set the volref 3.5v\n");
+ goto unlock_adc;
+ }
+ }
+
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
SC27XX_ADC_EN, SC27XX_ADC_EN);
if (ret)
- goto unlock_adc;
+ goto regulator_restore;
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
@@ -208,10 +517,11 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
goto disable_adc;
/* Configure the channel id and scale */
- tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
+ tmp = (scale << data->var_data->scale_shift) & data->var_data->scale_mask;
tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
- SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
+ SC27XX_ADC_CHN_ID_MASK |
+ data->var_data->scale_mask,
tmp);
if (ret)
goto disable_adc;
@@ -249,6 +559,17 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
disable_adc:
regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
SC27XX_ADC_EN, 0);
+regulator_restore:
+ if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+ ret_volref = regulator_set_voltage(data->volref,
+ SC27XX_ADC_REFVOL_VDD28,
+ SC27XX_ADC_REFVOL_VDD28);
+ if (ret_volref) {
+ dev_err(data->dev, "failed to set the volref 2.8v,ret_volref = 0x%x\n",
+ ret_volref);
+ ret = ret || ret_volref;
+ }
+ }
unlock_adc:
hwspin_unlock_raw(data->hwlock);
@@ -262,13 +583,14 @@ static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
int channel, int scale,
u32 *div_numerator, u32 *div_denominator)
{
- u32 ratio = sc27xx_adc_get_ratio(channel, scale);
+ u32 ratio;
+ ratio = data->var_data->get_ratio(channel, scale);
*div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
*div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
}
-static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+static int adc_to_volt(struct sc27xx_adc_linear_graph *graph,
int raw_adc)
{
int tmp;
@@ -277,6 +599,16 @@ static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
tmp /= (graph->adc0 - graph->adc1);
tmp += graph->volt1;
+ return tmp;
+}
+
+static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+ int raw_adc)
+{
+ int tmp;
+
+ tmp = adc_to_volt(graph, raw_adc);
+
return tmp < 0 ? 0 : tmp;
}
@@ -432,13 +764,13 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
{
int ret;
- ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ ret = regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
if (ret)
return ret;
/* Enable ADC work clock and controller clock */
- ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ ret = regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
if (ret)
@@ -456,10 +788,10 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
return 0;
disable_clk:
- regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
disable_adc:
- regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, 0);
return ret;
@@ -470,21 +802,76 @@ static void sc27xx_adc_disable(void *_data)
struct sc27xx_adc_data *data = _data;
/* Disable ADC work clock and controller clock */
- regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
- regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, 0);
}
+static const struct sc27xx_adc_variant_data sc2731_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2731_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &sc2731_big_scale_graph_calib,
+ .sscale_cal = &sc2731_small_scale_graph_calib,
+ .init_scale = sc2731_adc_scale_init,
+ .get_ratio = sc2731_adc_get_ratio,
+ .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2730_data = {
+ .module_en = SC2730_MODULE_EN,
+ .clk_en = SC2730_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &big_scale_graph_calib,
+ .sscale_cal = &small_scale_graph_calib,
+ .init_scale = sc2730_adc_scale_init,
+ .get_ratio = sc2730_adc_get_ratio,
+ .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2721_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2721_ARM_CLK_EN,
+ .scale_shift = SC2721_ADC_SCALE_SHIFT,
+ .scale_mask = SC2721_ADC_SCALE_MASK,
+ .bscale_cal = &sc2731_big_scale_graph_calib,
+ .sscale_cal = &sc2731_small_scale_graph_calib,
+ .init_scale = sc2731_adc_scale_init,
+ .get_ratio = sc2721_adc_get_ratio,
+ .set_volref = true,
+};
+
+static const struct sc27xx_adc_variant_data sc2720_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2721_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &big_scale_graph_calib,
+ .sscale_cal = &small_scale_graph_calib,
+ .init_scale = sc2720_adc_scale_init,
+ .get_ratio = sc2720_adc_get_ratio,
+ .set_volref = false,
+};
+
static int sc27xx_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sc27xx_adc_data *sc27xx_data;
+ const struct sc27xx_adc_variant_data *pdata;
struct iio_dev *indio_dev;
int ret;
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*sc27xx_data));
if (!indio_dev)
return -ENOMEM;
@@ -520,6 +907,16 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
}
sc27xx_data->dev = dev;
+ if (pdata->set_volref) {
+ sc27xx_data->volref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(sc27xx_data->volref)) {
+ ret = PTR_ERR(sc27xx_data->volref);
+ return dev_err_probe(dev, ret, "failed to get ADC volref\n");
+ }
+ }
+
+ sc27xx_data->var_data = pdata;
+ sc27xx_data->var_data->init_scale(sc27xx_data);
ret = sc27xx_adc_enable(sc27xx_data);
if (ret) {
@@ -546,7 +943,10 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
}
static const struct of_device_id sc27xx_adc_of_match[] = {
- { .compatible = "sprd,sc2731-adc", },
+ { .compatible = "sprd,sc2731-adc", .data = &sc2731_data},
+ { .compatible = "sprd,sc2730-adc", .data = &sc2730_data},
+ { .compatible = "sprd,sc2721-adc", .data = &sc2721_data},
+ { .compatible = "sprd,sc2720-adc", .data = &sc2720_data},
{ }
};
MODULE_DEVICE_TABLE(of, sc27xx_adc_of_match);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 9704cf0b9753..6d21ea84fa82 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -466,8 +466,7 @@ static int stm32_dfsdm_channels_configure(struct iio_dev *indio_dev,
* In continuous mode, use fast mode configuration,
* if it provides a better resolution.
*/
- if (adc->nconv == 1 && !trig &&
- (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)) {
+ if (adc->nconv == 1 && !trig && iio_buffer_enabled(indio_dev)) {
if (fl->flo[1].res >= fl->flo[0].res) {
fl->fast = 1;
flo = &fl->flo[1];
@@ -562,7 +561,7 @@ static int stm32_dfsdm_filter_configure(struct iio_dev *indio_dev,
cr1 = DFSDM_CR1_RCH(chan->channel);
/* Continuous conversions triggered by SPI clk in buffer mode */
- if (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)
+ if (iio_buffer_enabled(indio_dev))
cr1 |= DFSDM_CR1_RCONT(1);
cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index d2d405388499..000e5cfecb43 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -61,7 +61,7 @@ struct stmpe_adc {
static int stmpe_read_voltage(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
STMPE_ADC_CH(info->channel));
mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
static int stmpe_read_temp(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}
@@ -345,21 +345,22 @@ static int __maybe_unused stmpe_adc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
+static const struct of_device_id stmpe_adc_ids[] = {
+ { .compatible = "st,stmpe-adc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
+
static struct platform_driver stmpe_adc_driver = {
.probe = stmpe_adc_probe,
.driver = {
.name = "stmpe-adc",
.pm = &stmpe_adc_pm_ops,
+ .of_match_table = stmpe_adc_ids,
},
};
module_platform_driver(stmpe_adc_driver);
-static const struct of_device_id stmpe_adc_ids[] = {
- { .compatible = "st,stmpe-adc", },
- { },
-};
-MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
-
MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
MODULE_DESCRIPTION("STMPEXXX ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 068efbce1710..5544da80b636 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -76,10 +76,15 @@
#define ADS1015_DEFAULT_DATA_RATE 4
#define ADS1015_DEFAULT_CHAN 0
-enum chip_ids {
- ADSXXXX = 0,
- ADS1015,
- ADS1115,
+struct ads1015_chip_data {
+ struct iio_chan_spec const *channels;
+ int num_channels;
+ const struct iio_info *info;
+ const int *data_rate;
+ const int data_rate_len;
+ const int *scale;
+ const int scale_len;
+ bool has_comparator;
};
enum ads1015_channels {
@@ -94,11 +99,11 @@ enum ads1015_channels {
ADS1015_TIMESTAMP,
};
-static const unsigned int ads1015_data_rate[] = {
+static const int ads1015_data_rate[] = {
128, 250, 490, 920, 1600, 2400, 3300, 3300
};
-static const unsigned int ads1115_data_rate[] = {
+static const int ads1115_data_rate[] = {
8, 16, 32, 64, 128, 250, 475, 860
};
@@ -106,10 +111,28 @@ static const unsigned int ads1115_data_rate[] = {
* Translation from PGA bits to full-scale positive and negative input voltage
* range in mV
*/
-static int ads1015_fullscale_range[] = {
+static const int ads1015_fullscale_range[] = {
6144, 4096, 2048, 1024, 512, 256, 256, 256
};
+static const int ads1015_scale[] = { /* 12bit ADC */
+ 256, 11,
+ 512, 11,
+ 1024, 11,
+ 2048, 11,
+ 4096, 11,
+ 6144, 11
+};
+
+static const int ads1115_scale[] = { /* 16bit ADC */
+ 256, 15,
+ 512, 15,
+ 1024, 15,
+ 2048, 15,
+ 4096, 15,
+ 6144, 15
+};
+
/*
* Translation from COMP_QUE field value to the number of successive readings
* exceed the threshold values before an interrupt is generated
@@ -134,71 +157,53 @@ static const struct iio_event_spec ads1015_events[] = {
},
};
-#define ADS1015_V_CHAN(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .address = _addr, \
- .channel = _chan, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = _addr, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- .shift = 4, \
- .endianness = IIO_CPU, \
- }, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
- .datasheet_name = "AIN"#_chan, \
-}
-
-#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+/*
+ * Compile-time check whether _fitbits can accommodate up to _testbits
+ * bits. Returns _fitbits on success, fails to compile otherwise.
+ *
+ * The test works such that it multiplies constant _fitbits by constant
+ * double-negation of size of a non-empty structure, i.e. it multiplies
+ * constant _fitbits by constant 1 in each successful compilation case.
+ * The non-empty structure may contain C11 _Static_assert(), make use of
+ * this and place the kernel variant of static assert in there, so that
+ * it performs the compile-time check for _testbits <= _fitbits. Note
+ * that it is not possible to directly use static_assert in compound
+ * statements, hence this convoluted construct.
+ */
+#define FIT_CHECK(_testbits, _fitbits) \
+ ( \
+ (_fitbits) * \
+ !!sizeof(struct { \
+ static_assert((_testbits) <= (_fitbits)); \
+ int pad; \
+ }) \
+ )
+
+#define ADS1015_V_CHAN(_chan, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
.type = IIO_VOLTAGE, \
- .differential = 1, \
.indexed = 1, \
.address = _addr, \
.channel = _chan, \
- .channel2 = _chan2, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = _addr, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- .shift = 4, \
- .endianness = IIO_CPU, \
- }, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
- .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
-}
-
-#define ADS1115_V_CHAN(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .address = _addr, \
- .channel = _chan, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ .info_mask_shared_by_all_available = \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = _addr, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
+ .realbits = (_realbits), \
+ .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \
+ .shift = (_shift), \
.endianness = IIO_CPU, \
}, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
+ .event_spec = (_event_spec), \
+ .num_event_specs = (_num_event_specs), \
.datasheet_name = "AIN"#_chan, \
}
-#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
.type = IIO_VOLTAGE, \
.differential = 1, \
.indexed = 1, \
@@ -208,15 +213,19 @@ static const struct iio_event_spec ads1015_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = _addr, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
+ .realbits = (_realbits), \
+ .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \
+ .shift = (_shift), \
.endianness = IIO_CPU, \
}, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
+ .event_spec = (_event_spec), \
+ .num_event_specs = (_num_event_specs), \
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
@@ -245,7 +254,7 @@ struct ads1015_data {
unsigned int comp_mode;
struct ads1015_thresh_data thresh_data[ADS1015_CHANNELS];
- unsigned int *data_rate;
+ const struct ads1015_chip_data *chip;
/*
* Set to true when the ADC is switched to the continuous-conversion
* mode and exits from a power-down state. This flag is used to avoid
@@ -273,49 +282,91 @@ static void ads1015_event_channel_disable(struct ads1015_data *data, int chan)
data->event_channel = ADS1015_CHANNELS;
}
-static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case ADS1015_CFG_REG:
- case ADS1015_LO_THRESH_REG:
- case ADS1015_HI_THRESH_REG:
- return true;
- default:
- return false;
- }
-}
+static const struct regmap_range ads1015_writeable_ranges[] = {
+ regmap_reg_range(ADS1015_CFG_REG, ADS1015_HI_THRESH_REG),
+};
+
+static const struct regmap_access_table ads1015_writeable_table = {
+ .yes_ranges = ads1015_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ads1015_writeable_ranges),
+};
static const struct regmap_config ads1015_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = ADS1015_HI_THRESH_REG,
- .writeable_reg = ads1015_is_writeable_reg,
+ .wr_table = &ads1015_writeable_table,
+};
+
+static const struct regmap_range tla2024_writeable_ranges[] = {
+ regmap_reg_range(ADS1015_CFG_REG, ADS1015_CFG_REG),
+};
+
+static const struct regmap_access_table tla2024_writeable_table = {
+ .yes_ranges = tla2024_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tla2024_writeable_ranges),
+};
+
+static const struct regmap_config tla2024_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADS1015_CFG_REG,
+ .wr_table = &tla2024_writeable_table,
};
static const struct iio_chan_spec ads1015_channels[] = {
- ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
- ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
- ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
- ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
- ADS1015_V_CHAN(0, ADS1015_AIN0),
- ADS1015_V_CHAN(1, ADS1015_AIN1),
- ADS1015_V_CHAN(2, ADS1015_AIN2),
- ADS1015_V_CHAN(3, ADS1015_AIN3),
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
static const struct iio_chan_spec ads1115_channels[] = {
- ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
- ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
- ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
- ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
- ADS1115_V_CHAN(0, ADS1015_AIN0),
- ADS1115_V_CHAN(1, ADS1015_AIN1),
- ADS1115_V_CHAN(2, ADS1015_AIN2),
- ADS1115_V_CHAN(3, ADS1015_AIN3),
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+static const struct iio_chan_spec tla2024_channels[] = {
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4, NULL, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+
#ifdef CONFIG_PM
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
@@ -344,6 +395,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
+ const int *data_rate = data->chip->data_rate;
int ret, pga, dr, dr_old, conv_time;
unsigned int old, mask, cfg;
@@ -378,8 +430,8 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
}
if (data->conv_invalid) {
dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT;
- conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
- conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr_old]);
+ conv_time += DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr]);
conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
usleep_range(conv_time, conv_time + 1);
data->conv_invalid = false;
@@ -445,8 +497,8 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
- if (data->data_rate[i] == rate) {
+ for (i = 0; i < data->chip->data_rate_len; i++) {
+ if (data->chip->data_rate[i] == rate) {
data->channel_data[chan].data_rate = i;
return 0;
}
@@ -455,6 +507,32 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
return -EINVAL;
}
+static int ads1015_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_FRACTIONAL_LOG2;
+ *vals = data->chip->scale;
+ *length = data->chip->scale_len;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT;
+ *vals = data->chip->data_rate;
+ *length = data->chip->data_rate_len;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ads1015_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -504,7 +582,7 @@ release_direct:
break;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
- *val = data->data_rate[idx];
+ *val = data->chip->data_rate[idx];
ret = IIO_VAL_INT;
break;
default:
@@ -564,7 +642,7 @@ static int ads1015_read_event(struct iio_dev *indio_dev,
dr = data->channel_data[chan->address].data_rate;
comp_queue = data->thresh_data[chan->address].comp_queue;
period = ads1015_comp_queue[comp_queue] *
- USEC_PER_SEC / data->data_rate[dr];
+ USEC_PER_SEC / data->chip->data_rate[dr];
*val = period / USEC_PER_SEC;
*val2 = period % USEC_PER_SEC;
@@ -586,6 +664,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
int val2)
{
struct ads1015_data *data = iio_priv(indio_dev);
+ const int *data_rate = data->chip->data_rate;
int realbits = chan->scan_type.realbits;
int ret = 0;
long long period;
@@ -611,7 +690,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(ads1015_comp_queue) - 1; i++) {
if (period <= ads1015_comp_queue[i] *
- USEC_PER_SEC / data->data_rate[dr])
+ USEC_PER_SEC / data_rate[dr])
break;
}
data->thresh_data[chan->address].comp_queue = i;
@@ -802,54 +881,20 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
.validate_scan_mask = &iio_validate_scan_mask_onehot,
};
-static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
- "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
- "0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
-
-static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
- sampling_frequency_available, "128 250 490 920 1600 2400 3300");
-static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
- sampling_frequency_available, "8 16 32 64 128 250 475 860");
-
-static struct attribute *ads1015_attributes[] = {
- &iio_const_attr_ads1015_scale_available.dev_attr.attr,
- &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ads1015_attribute_group = {
- .attrs = ads1015_attributes,
-};
-
-static struct attribute *ads1115_attributes[] = {
- &iio_const_attr_ads1115_scale_available.dev_attr.attr,
- &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ads1115_attribute_group = {
- .attrs = ads1115_attributes,
-};
-
static const struct iio_info ads1015_info = {
+ .read_avail = ads1015_read_avail,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
.write_event_value = ads1015_write_event,
.read_event_config = ads1015_read_event_config,
.write_event_config = ads1015_write_event_config,
- .attrs = &ads1015_attribute_group,
};
-static const struct iio_info ads1115_info = {
+static const struct iio_info tla2024_info = {
+ .read_avail = ads1015_read_avail,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
- .read_event_value = ads1015_read_event,
- .write_event_value = ads1015_write_event,
- .read_event_config = ads1015_read_event_config,
- .write_event_config = ads1015_write_event_config,
- .attrs = &ads1115_attribute_group,
};
static int ads1015_client_get_channels_config(struct i2c_client *client)
@@ -932,12 +977,18 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode)
static int ads1015_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ads1015_chip_data *chip;
struct iio_dev *indio_dev;
struct ads1015_data *data;
int ret;
- enum chip_ids chip;
int i;
+ chip = device_get_match_data(&client->dev);
+ if (!chip)
+ chip = (const struct ads1015_chip_data *)id->driver_data;
+ if (!chip)
+ return dev_err_probe(&client->dev, -EINVAL, "Unknown chip\n");
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -950,28 +1001,12 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- chip = (uintptr_t)device_get_match_data(&client->dev);
- if (chip == ADSXXXX)
- chip = id->driver_data;
- switch (chip) {
- case ADS1015:
- indio_dev->channels = ads1015_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
- indio_dev->info = &ads1015_info;
- data->data_rate = (unsigned int *) &ads1015_data_rate;
- break;
- case ADS1115:
- indio_dev->channels = ads1115_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
- indio_dev->info = &ads1115_info;
- data->data_rate = (unsigned int *) &ads1115_data_rate;
- break;
- default:
- dev_err(&client->dev, "Unknown chip %d\n", chip);
- return -EINVAL;
- }
-
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->info = chip->info;
+ data->chip = chip;
data->event_channel = ADS1015_CHANNELS;
+
/*
* Set default lower and upper threshold to min and max value
* respectively.
@@ -986,7 +1021,9 @@ static int ads1015_probe(struct i2c_client *client,
/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
ads1015_get_channels_config(client);
- data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+ data->regmap = devm_regmap_init_i2c(client, chip->has_comparator ?
+ &ads1015_regmap_config :
+ &tla2024_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "Failed to allocate register map\n");
return PTR_ERR(data->regmap);
@@ -1000,7 +1037,7 @@ static int ads1015_probe(struct i2c_client *client,
return ret;
}
- if (client->irq) {
+ if (client->irq && chip->has_comparator) {
unsigned long irq_trig =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
unsigned int cfg_comp_mask = ADS1015_CFG_COMP_QUE_MASK |
@@ -1099,22 +1136,51 @@ static const struct dev_pm_ops ads1015_pm_ops = {
ads1015_runtime_resume, NULL)
};
+static const struct ads1015_chip_data ads1015_data = {
+ .channels = ads1015_channels,
+ .num_channels = ARRAY_SIZE(ads1015_channels),
+ .info = &ads1015_info,
+ .data_rate = ads1015_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1015_data_rate),
+ .scale = ads1015_scale,
+ .scale_len = ARRAY_SIZE(ads1015_scale),
+ .has_comparator = true,
+};
+
+static const struct ads1015_chip_data ads1115_data = {
+ .channels = ads1115_channels,
+ .num_channels = ARRAY_SIZE(ads1115_channels),
+ .info = &ads1015_info,
+ .data_rate = ads1115_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1115_data_rate),
+ .scale = ads1115_scale,
+ .scale_len = ARRAY_SIZE(ads1115_scale),
+ .has_comparator = true,
+};
+
+static const struct ads1015_chip_data tla2024_data = {
+ .channels = tla2024_channels,
+ .num_channels = ARRAY_SIZE(tla2024_channels),
+ .info = &tla2024_info,
+ .data_rate = ads1015_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1015_data_rate),
+ .scale = ads1015_scale,
+ .scale_len = ARRAY_SIZE(ads1015_scale),
+ .has_comparator = false,
+};
+
static const struct i2c_device_id ads1015_id[] = {
- {"ads1015", ADS1015},
- {"ads1115", ADS1115},
+ { "ads1015", (kernel_ulong_t)&ads1015_data },
+ { "ads1115", (kernel_ulong_t)&ads1115_data },
+ { "tla2024", (kernel_ulong_t)&tla2024_data },
{}
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
static const struct of_device_id ads1015_of_match[] = {
- {
- .compatible = "ti,ads1015",
- .data = (void *)ADS1015
- },
- {
- .compatible = "ti,ads1115",
- .data = (void *)ADS1115
- },
+ { .compatible = "ti,ads1015", .data = &ads1015_data },
+ { .compatible = "ti,ads1115", .data = &ads1115_data },
+ { .compatible = "ti,tla2024", .data = &tla2024_data },
{}
};
MODULE_DEVICE_TABLE(of, ads1015_of_match);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 22c2583eedd0..708cca0a63be 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -508,6 +508,7 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match);
static struct spi_driver ads8688_driver = {
.driver = {
.name = "ads8688",
+ .of_match_table = ads8688_of_match,
},
.probe = ads8688_probe,
.remove = ads8688_remove,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index dbdc1ef48566..567d43a30955 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -376,9 +376,7 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
{
int ret;
- ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
- setup_ops);
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/afe/Kconfig b/drivers/iio/afe/Kconfig
index 4fa397822cff..9a1d95c1c7ed 100644
--- a/drivers/iio/afe/Kconfig
+++ b/drivers/iio/afe/Kconfig
@@ -8,7 +8,6 @@ menu "Analog Front Ends"
config IIO_RESCALE
tristate "IIO rescale"
- depends on OF || COMPILE_TEST
help
Say yes here to build support for the IIO rescaling
that handles voltage dividers, current sense shunts and
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 7e511293d6d1..c6cf709f0f05 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -10,9 +10,8 @@
#include <linux/err.h>
#include <linux/gcd.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -536,7 +535,7 @@ static int rescale_probe(struct platform_device *pdev)
rescale = iio_priv(indio_dev);
- rescale->cfg = of_device_get_match_data(dev);
+ rescale->cfg = device_get_match_data(dev);
rescale->numerator = 1;
rescale->denominator = 1;
rescale->offset = 0;
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 416d35a61ae2..35d8b4077376 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -259,8 +259,6 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
* devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device
* @dev: Device object to which to attach the life-time of this kfifo buffer
* @indio_dev: The device the buffer should be attached to
- * @mode_flags: The mode flags for this buffer (INDIO_BUFFER_SOFTWARE and/or
- * INDIO_BUFFER_TRIGGERED).
* @setup_ops: The setup_ops required to configure the HW part of the buffer (optional)
* @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer
*
@@ -271,22 +269,16 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
*/
int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
- int mode_flags,
const struct iio_buffer_setup_ops *setup_ops,
const struct attribute **buffer_attrs)
{
struct iio_buffer *buffer;
- if (!mode_flags)
- return -EINVAL;
-
buffer = devm_iio_kfifo_allocate(dev);
if (!buffer)
return -ENOMEM;
- mode_flags &= kfifo_access_funcs.modes;
-
- indio_dev->modes |= mode_flags;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
indio_dev->setup_ops = setup_ops;
buffer->attrs = buffer_attrs;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index b2725c6adc7f..5976aca48e3b 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -333,8 +333,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
* We can not use trigger here, as events are generated
* as soon as sample_frequency is set.
*/
- ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE, NULL,
+ ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL,
cros_ec_sensor_fifo_attributes);
if (ret)
return ret;
@@ -413,7 +412,7 @@ static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
int ret, i;
bool calibrate;
- ret = strtobool(buf, &calibrate);
+ ret = kstrtobool(buf, &calibrate);
if (ret < 0)
return ret;
if (!calibrate)
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index d538bf3ab1ef..793d628db55f 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -686,7 +686,6 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev,
scmi_iio_dev,
- INDIO_BUFFER_SOFTWARE,
&scmi_iio_buffer_ops);
if (err < 0) {
dev_err(dev,
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 769bd9280524..f32b04b63ea1 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -331,12 +331,11 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
/* threaded irq */
int ssp_irq_msg(struct ssp_data *data)
{
- bool found = false;
char *buffer;
u8 msg_type;
int ret;
u16 length, msg_options;
- struct ssp_msg *msg, *n;
+ struct ssp_msg *msg = NULL, *iter, *n;
ret = spi_read(data->spi, data->header_buffer, SSP_HEADER_BUFFER_SIZE);
if (ret < 0) {
@@ -362,15 +361,15 @@ int ssp_irq_msg(struct ssp_data *data)
* received with no order
*/
mutex_lock(&data->pending_lock);
- list_for_each_entry_safe(msg, n, &data->pending_list, list) {
- if (msg->options == msg_options) {
- list_del(&msg->list);
- found = true;
+ list_for_each_entry_safe(iter, n, &data->pending_list, list) {
+ if (iter->options == msg_options) {
+ list_del(&iter->list);
+ msg = iter;
break;
}
}
- if (!found) {
+ if (!msg) {
/*
* here can be implemented dead messages handling
* but the slave should not send such ones - it is to
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index fa9bcdf0d190..9910ba1da085 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -71,16 +71,18 @@ st_sensors_match_odr_error:
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
- int err;
+ int err = 0;
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ mutex_lock(&sdata->odr_lock);
+
if (!sdata->sensor_settings->odr.mask)
- return 0;
+ goto unlock_mutex;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
- goto st_sensors_match_odr_error;
+ goto unlock_mutex;
if ((sdata->sensor_settings->odr.addr ==
sdata->sensor_settings->pw.addr) &&
@@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
if (err >= 0)
sdata->odr = odr_out.hz;
-st_sensors_match_odr_error:
+unlock_mutex:
+ mutex_unlock(&sdata->odr_lock);
+
return err;
}
EXPORT_SYMBOL_NS(st_sensors_set_odr, IIO_ST_SENSORS);
@@ -361,6 +365,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *of_pdata;
int err = 0;
+ mutex_init(&sdata->odr_lock);
+
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (IS_ERR(of_pdata))
@@ -549,26 +555,28 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
- err = -EBUSY;
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ mutex_lock(&sdata->odr_lock);
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
goto out;
- } else {
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto out;
- msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
- err = st_sensors_read_axis_data(indio_dev, ch, val);
- if (err < 0)
- goto out;
+ msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
+ err = st_sensors_read_axis_data(indio_dev, ch, val);
+ if (err < 0)
+ goto out;
- *val = *val >> ch->scan_type.shift;
+ *val = *val >> ch->scan_type.shift;
+
+ err = st_sensors_set_enable(indio_dev, false);
- err = st_sensors_set_enable(indio_dev, false);
- }
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&sdata->odr_lock);
+ iio_device_release_direct_mode(indio_dev);
return err;
}
@@ -641,7 +649,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
break;
@@ -649,7 +656,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
sdata->sensor_settings->odr.odr_avl[i].hz);
}
- mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
return len;
@@ -663,7 +669,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
@@ -673,7 +678,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
- mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index c0bf0d84197f..d1c7bde8aece 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -285,7 +285,6 @@ config CIO_DAC
config DPOT_DAC
tristate "DAC emulation using a DPOT"
- depends on OF
help
Say yes here to build support for DAC emulation using a digital
potentiometer.
@@ -305,7 +304,7 @@ config DS4424
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
help
Say yes here to build support for NXP LPC18XX DAC.
@@ -442,7 +441,6 @@ config TI_DAC7612
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
- depends on OF
depends on HAS_IOMEM
help
Say yes here to support Vybrid board digital-to-analog converter.
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 27ee2c63c5d4..d87cf14daabe 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -288,7 +288,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index ecbc6a51d60f..22b000a40828 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -284,7 +284,7 @@ static ssize_t ad5360_write_dac_powerdown(struct device *dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 82e1d9bd773e..a44c83242fb1 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -96,7 +96,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index fdf824041497..09e242949cd0 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -114,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 8507573aa13e..a0817e799cc0 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -182,7 +182,7 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5504_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 371e812850eb..7e6f824de299 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -129,7 +129,7 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5624r_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index f78dd3f33199..15361d8bbf94 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -73,7 +73,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
unsigned int val, ref_bit_msk;
u8 shift, address = 0;
- ret = strtobool(buf, &readin);
+ ret = kstrtobool(buf, &readin);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 7a62e6e1d5f1..1a63b8456725 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -502,7 +502,7 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 2b14914b4050..339564fe47d1 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -188,7 +188,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5791_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 91eaaf793b3e..03edf046dec6 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -77,7 +77,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index aed46c80757e..3a3c4f4874e4 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -10,6 +10,7 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
@@ -149,7 +150,7 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ltc2632_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
@@ -362,8 +363,7 @@ static int ltc2632_probe(struct spi_device *spi)
}
}
- indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name
- : spi_get_device_id(spi)->name;
+ indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name;
indio_dev->info = &ltc2632_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = chip_info->channels;
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, ltc2632_of_match);
static struct spi_driver ltc2632_driver = {
.driver = {
.name = "ltc2632",
- .of_match_table = of_match_ptr(ltc2632_of_match),
+ .of_match_table = ltc2632_of_match,
},
.probe = ltc2632_probe,
.remove = ltc2632_remove,
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 2f9c384885f4..937b0d25a11c 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -703,21 +703,20 @@ static int ltc2688_tgp_clk_setup(struct ltc2688_state *st,
struct ltc2688_chan *chan,
struct fwnode_handle *node, int tgp)
{
+ struct device *dev = &st->spi->dev;
unsigned long rate;
struct clk *clk;
int ret, f;
- clk = devm_get_clk_from_child(&st->spi->dev, to_of_node(node), NULL);
+ clk = devm_get_clk_from_child(dev, to_of_node(node), NULL);
if (IS_ERR(clk))
- return dev_err_probe(&st->spi->dev, PTR_ERR(clk),
- "failed to get tgp clk.\n");
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get tgp clk.\n");
ret = clk_prepare_enable(clk);
if (ret)
- return dev_err_probe(&st->spi->dev, ret,
- "failed to enable tgp clk.\n");
+ return dev_err_probe(dev, ret, "failed to enable tgp clk.\n");
- ret = devm_add_action_or_reset(&st->spi->dev, ltc2688_clk_disable, clk);
+ ret = devm_add_action_or_reset(dev, ltc2688_clk_disable, clk);
if (ret)
return ret;
@@ -858,6 +857,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st)
static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
{
+ struct device *dev = &st->spi->dev;
struct gpio_desc *gpio;
int ret;
@@ -865,10 +865,9 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
* If we have a reset pin, use that to reset the board, If not, use
* the reset bit.
*/
- gpio = devm_gpiod_get_optional(&st->spi->dev, "clr", GPIOD_OUT_HIGH);
+ gpio = devm_gpiod_get_optional(dev, "clr", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
- return dev_err_probe(&st->spi->dev, PTR_ERR(gpio),
- "Failed to get reset gpio");
+ return dev_err_probe(dev, PTR_ERR(gpio), "Failed to get reset gpio");
if (gpio) {
usleep_range(1000, 1200);
/* bring device out of reset */
@@ -887,7 +886,7 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
* Duplicate the default channel configuration as it can change during
* @ltc2688_channel_config()
*/
- st->iio_chan = devm_kmemdup(&st->spi->dev, ltc2688_channels,
+ st->iio_chan = devm_kmemdup(dev, ltc2688_channels,
sizeof(ltc2688_channels), GFP_KERNEL);
if (!st->iio_chan)
return -ENOMEM;
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index fce640b7f1c8..540f9ea7cada 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -116,7 +116,7 @@ static ssize_t max5821_write_dac_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 842bad57cb88..7fcb86288823 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -80,7 +80,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
@@ -178,7 +178,7 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret)
return ret;
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index b20192a071cb..daa42bcbae83 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -220,7 +220,7 @@ static ssize_t stm32_dac_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 4e1156e6deb2..106ce3546419 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -133,7 +133,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 0b775f943db3..4b6b04038e94 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -179,7 +179,7 @@ static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index e10d17e60ed3..4afc411725d9 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -123,7 +123,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
u8 power;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c0b7ef900735..c24f609c2ade 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
- if (!swd) {
- ret = -ENOMEM;
- goto error_kzalloc;
- }
+ if (!swd)
+ return ERR_PTR(-ENOMEM);
+
/*
* Allocate an IIO device.
*
@@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
- goto error_ret;
+ goto error_free_swd;
}
st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
+ if (!indio_dev->name) {
+ ret = -ENOMEM;
+ goto error_free_device;
+ }
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
- goto error_free_device;
+ goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
@@ -649,11 +652,12 @@ error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
+error_free_name:
+ kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
-error_ret:
+error_free_swd:
kfree(swd);
-error_kzalloc:
return ERR_PTR(ret);
}
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index d81c2b2dad82..9b2f99449a82 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -45,41 +45,31 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
+ int i = 0, j;
u16 *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
- /*
- * Three common options here:
- * hardware scans: certain combinations of channels make
- * up a fast read. The capture will consist of all of them.
- * Hence we just call the grab data function and fill the
- * buffer without processing.
- * software scans: can be considered to be random access
- * so efficient reading is just a case of minimal bus
- * transactions.
- * software culled hardware scans:
- * occasionally a driver may process the nearest hardware
- * scan to avoid storing elements that are not desired. This
- * is the fiddliest option by far.
- * Here let's pretend we have random access. And the values are
- * in the constant table fakedata.
- */
- int i, j;
-
- for (i = 0, j = 0;
- i < bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
- i++, j++) {
- j = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength, j);
- /* random access read from the 'device' */
- data[i] = fakedata[j];
- }
- }
+ /*
+ * Three common options here:
+ * hardware scans:
+ * certain combinations of channels make up a fast read. The capture
+ * will consist of all of them. Hence we just call the grab data
+ * function and fill the buffer without processing.
+ * software scans:
+ * can be considered to be random access so efficient reading is just
+ * a case of minimal bus transactions.
+ * software culled hardware scans:
+ * occasionally a driver may process the nearest hardware scan to avoid
+ * storing elements that are not desired. This is the fiddliest option
+ * by far.
+ * Here let's pretend we have random access. And the values are in the
+ * constant table fakedata.
+ */
+ for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
+ data[i++] = fakedata[j];
iio_push_to_buffers_with_timestamp(indio_dev, data,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index a0f92c336fc4..942870539268 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -516,7 +516,7 @@ static ssize_t ad9523_store(struct device *dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 410e5e9f2672..0923fd793492 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -7,9 +7,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -822,7 +822,6 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct device_node *np = indio_dev->dev.of_node;
unsigned long irq_trig;
bool irq_open_drain;
int irq1;
@@ -831,8 +830,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
if (!data->irq)
return 0;
- irq1 = of_irq_get_byname(np, "INT1");
-
+ irq1 = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
if (irq1 == data->irq) {
dev_info(dev, "using interrupt line INT1\n");
ret = regmap_field_write(data->regmap_fields[F_INT_CFG_DRDY],
@@ -843,7 +841,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
dev_info(dev, "using interrupt line INT2\n");
- irq_open_drain = of_property_read_bool(np, "drive-open-drain");
+ irq_open_drain = device_property_read_bool(dev, "drive-open-drain");
data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index ea387efab62d..4f19dc7ffe57 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/random.h>
#include <linux/slab.h>
@@ -1050,6 +1051,7 @@ static const struct iio_trigger_ops mpu3050_trigger_ops = {
static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ struct device *dev = mpu3050->dev;
unsigned long irq_trig;
int ret;
@@ -1061,8 +1063,7 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
return -ENOMEM;
/* Check if IRQ is open drain */
- if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
- mpu3050->irq_opendrain = true;
+ mpu3050->irq_opendrain = device_property_read_bool(dev, "drive-open-drain");
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
/*
@@ -1118,13 +1119,12 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
mpu3050->trig->name,
mpu3050->trig);
if (ret) {
- dev_err(mpu3050->dev,
- "can't get IRQ %d, error %d\n", irq, ret);
+ dev_err(dev, "can't get IRQ %d, error %d\n", irq, ret);
return ret;
}
mpu3050->irq = irq;
- mpu3050->trig->dev.parent = mpu3050->dev;
+ mpu3050->trig->dev.parent = dev;
mpu3050->trig->ops = &mpu3050_trigger_ops;
iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
@@ -1263,7 +1263,7 @@ err_power_down:
}
EXPORT_SYMBOL(mpu3050_common_probe);
-int mpu3050_common_remove(struct device *dev)
+void mpu3050_common_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
@@ -1276,8 +1276,6 @@ int mpu3050_common_remove(struct device *dev)
free_irq(mpu3050->irq, mpu3050);
iio_device_unregister(indio_dev);
mpu3050_power_down(mpu3050);
-
- return 0;
}
EXPORT_SYMBOL(mpu3050_common_remove);
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index ef5bcbc4b45b..5b5f58baaf7f 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -86,7 +86,9 @@ static int mpu3050_i2c_remove(struct i2c_client *client)
if (mpu3050->i2cmux)
i2c_mux_del_adapters(mpu3050->i2cmux);
- return mpu3050_common_remove(&client->dev);
+ mpu3050_common_remove(&client->dev);
+
+ return 0;
}
/*
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
index 835b0249c376..faf4168a3b07 100644
--- a/drivers/iio/gyro/mpu3050.h
+++ b/drivers/iio/gyro/mpu3050.h
@@ -91,7 +91,7 @@ int mpu3050_common_probe(struct device *dev,
struct regmap *map,
int irq,
const char *name);
-int mpu3050_common_remove(struct device *dev);
+void mpu3050_common_remove(struct device *dev);
/* PM ops */
extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c
index 5fd1bf9902ea..d332474bc484 100644
--- a/drivers/iio/gyro/ssp_gyro_sensor.c
+++ b/drivers/iio/gyro/ssp_gyro_sensor.c
@@ -113,7 +113,6 @@ static int ssp_gyro_probe(struct platform_device *pdev)
indio_dev->available_scan_masks = ssp_gyro_scan_mask;
ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ssp_gyro_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 62172e18d0d8..eaa35da42b33 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -406,24 +406,17 @@ read_error:
static int st_gyro_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
- err = -EINVAL;
+ return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 36ba7611d9ce..ad5717965223 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -433,7 +433,6 @@ static int max30100_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&max30100_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 2292876c55e2..abbcef563807 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -542,7 +542,6 @@ static int max30102_probe(struct i2c_client *client,
}
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&max30102_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 44bbe3d19907..fe520194a837 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -7,14 +7,16 @@
#include <linux/clk.h>
#include <linux/bitfield.h>
-#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/math.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/lcm.h>
+#include <linux/property.h>
#include <linux/swab.h>
#include <linux/crc32.h>
@@ -1119,6 +1121,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16480 *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
+ struct device *dev = &adis->spi->dev;
int ret, bit, offset, i = 0;
__be16 *buffer;
u32 crc;
@@ -1130,7 +1133,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
adis->tx[1] = 0;
ret = spi_write(adis->spi, adis->tx, 2);
if (ret) {
- dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+ dev_err(dev, "Failed to change device page: %d\n", ret);
adis_dev_unlock(adis);
goto irq_done;
}
@@ -1140,7 +1143,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
ret = spi_sync(adis->spi, &adis->msg);
if (ret) {
- dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+ dev_err(dev, "Failed to read data: %d\n", ret);
adis_dev_unlock(adis);
goto irq_done;
}
@@ -1168,14 +1171,14 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
}
if (offset == 4) {
- dev_err(&adis->spi->dev, "Invalid burst data\n");
+ dev_err(dev, "Invalid burst data\n");
goto irq_done;
}
crc = be16_to_cpu(buffer[offset + 16]) << 16 | be16_to_cpu(buffer[offset + 15]);
valid = adis16480_validate_crc((u16 *)&buffer[offset], 15, crc);
if (!valid) {
- dev_err(&adis->spi->dev, "Invalid crc\n");
+ dev_err(dev, "Invalid crc\n");
goto irq_done;
}
@@ -1214,12 +1217,12 @@ static const struct iio_info adis16480_info = {
static int adis16480_stop_device(struct iio_dev *indio_dev)
{
struct adis16480 *st = iio_priv(indio_dev);
+ struct device *dev = &st->adis.spi->dev;
int ret;
ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9));
if (ret)
- dev_err(&indio_dev->dev,
- "Could not power down device: %d\n", ret);
+ dev_err(dev, "Could not power down device: %d\n", ret);
return ret;
}
@@ -1239,9 +1242,10 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
-static int adis16480_config_irq_pin(struct device_node *of_node,
- struct adis16480 *st)
+static int adis16480_config_irq_pin(struct adis16480 *st)
{
+ struct device *dev = &st->adis.spi->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct irq_data *desc;
enum adis16480_int_pin pin;
unsigned int irq_type;
@@ -1250,7 +1254,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
desc = irq_get_irq_data(st->adis.spi->irq);
if (!desc) {
- dev_err(&st->adis.spi->dev, "Could not find IRQ %d\n", irq);
+ dev_err(dev, "Could not find IRQ %d\n", irq);
return -EINVAL;
}
@@ -1267,7 +1271,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
*/
pin = ADIS16480_PIN_DIO1;
for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
- irq = of_irq_get_byname(of_node, adis16480_int_pin_names[i]);
+ irq = fwnode_irq_get_byname(fwnode, adis16480_int_pin_names[i]);
if (irq > 0) {
pin = i;
break;
@@ -1287,23 +1291,22 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
} else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
val |= ADIS16480_DRDY_POL(0);
} else {
- dev_err(&st->adis.spi->dev,
- "Invalid interrupt type 0x%x specified\n", irq_type);
+ dev_err(dev, "Invalid interrupt type 0x%x specified\n", irq_type);
return -EINVAL;
}
/* Write the data ready configuration to the FNCTIO_CTRL register */
return adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
-static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
- struct device_node *of_node)
+static int adis16480_fw_get_ext_clk_pin(struct adis16480 *st)
{
+ struct device *dev = &st->adis.spi->dev;
const char *ext_clk_pin;
enum adis16480_int_pin pin;
int i;
pin = ADIS16480_PIN_DIO2;
- if (of_property_read_string(of_node, "adi,ext-clk-pin", &ext_clk_pin))
+ if (device_property_read_string(dev, "adi,ext-clk-pin", &ext_clk_pin))
goto clk_input_not_found;
for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
@@ -1312,15 +1315,13 @@ static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
}
clk_input_not_found:
- dev_info(&st->adis.spi->dev,
- "clk input line not specified, using DIO2\n");
+ dev_info(dev, "clk input line not specified, using DIO2\n");
return pin;
}
-static int adis16480_ext_clk_config(struct adis16480 *st,
- struct device_node *of_node,
- bool enable)
+static int adis16480_ext_clk_config(struct adis16480 *st, bool enable)
{
+ struct device *dev = &st->adis.spi->dev;
unsigned int mode, mask;
enum adis16480_int_pin pin;
uint16_t val;
@@ -1330,16 +1331,14 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
if (ret)
return ret;
- pin = adis16480_of_get_ext_clk_pin(st, of_node);
+ pin = adis16480_fw_get_ext_clk_pin(st);
/*
* Each DIOx pin supports only one function at a time. When a single pin
* has two assignments, the enable bit for a lower priority function
* automatically resets to zero (disabling the lower priority function).
*/
if (pin == ADIS16480_DRDY_SEL(val))
- dev_warn(&st->adis.spi->dev,
- "DIO%x pin supports only one function at a time\n",
- pin + 1);
+ dev_warn(dev, "DIO%x pin supports only one function at a time\n", pin + 1);
mode = ADIS16480_SYNC_EN(enable) | ADIS16480_SYNC_SEL(pin);
mask = ADIS16480_SYNC_EN_MSK | ADIS16480_SYNC_SEL_MSK;
@@ -1361,31 +1360,27 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
static int adis16480_get_ext_clocks(struct adis16480 *st)
{
- st->clk_mode = ADIS16480_CLK_INT;
- st->ext_clk = devm_clk_get(&st->adis.spi->dev, "sync");
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ struct device *dev = &st->adis.spi->dev;
+
+ st->ext_clk = devm_clk_get_optional(dev, "sync");
+ if (IS_ERR(st->ext_clk))
+ return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+ if (st->ext_clk) {
st->clk_mode = ADIS16480_CLK_SYNC;
return 0;
}
- if (PTR_ERR(st->ext_clk) != -ENOENT) {
- dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
- return PTR_ERR(st->ext_clk);
- }
-
if (st->chip_info->has_pps_clk_mode) {
- st->ext_clk = devm_clk_get(&st->adis.spi->dev, "pps");
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ st->ext_clk = devm_clk_get_optional(dev, "pps");
+ if (IS_ERR(st->ext_clk))
+ return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+ if (st->ext_clk) {
st->clk_mode = ADIS16480_CLK_PPS;
return 0;
}
-
- if (PTR_ERR(st->ext_clk) != -ENOENT) {
- dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
- return PTR_ERR(st->ext_clk);
- }
}
+ st->clk_mode = ADIS16480_CLK_INT;
return 0;
}
@@ -1404,11 +1399,12 @@ static int adis16480_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
const struct adis_data *adis16480_data;
irq_handler_t trigger_handler = NULL;
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -1432,13 +1428,12 @@ static int adis16480_probe(struct spi_device *spi)
return ret;
if (st->chip_info->has_sleep_cnt) {
- ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
- indio_dev);
+ ret = devm_add_action_or_reset(dev, adis16480_stop, indio_dev);
if (ret)
return ret;
}
- ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+ ret = adis16480_config_irq_pin(st);
if (ret)
return ret;
@@ -1446,12 +1441,12 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
- ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
+ if (st->ext_clk) {
+ ret = adis16480_ext_clk_config(st, true);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&spi->dev, adis16480_clk_disable, st->ext_clk);
+ ret = devm_add_action_or_reset(dev, adis16480_clk_disable, st->ext_clk);
if (ret)
return ret;
@@ -1484,7 +1479,7 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 01336105792e..e7aec56ea136 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -11,10 +11,9 @@
*/
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -525,17 +524,6 @@ static const struct iio_info bmi160_info = {
.attrs = &bmi160_attrs_group,
};
-static const char *bmi160_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
-
- return dev_name(dev);
-}
-
static int bmi160_write_conf_reg(struct regmap *regmap, unsigned int reg,
unsigned int mask, unsigned int bits,
unsigned int write_usleep)
@@ -647,18 +635,18 @@ int bmi160_enable_irq(struct regmap *regmap, bool enable)
}
EXPORT_SYMBOL(bmi160_enable_irq);
-static int bmi160_get_irq(struct device_node *of_node, enum bmi160_int_pin *pin)
+static int bmi160_get_irq(struct fwnode_handle *fwnode, enum bmi160_int_pin *pin)
{
int irq;
/* Use INT1 if possible, otherwise fall back to INT2. */
- irq = of_irq_get_byname(of_node, "INT1");
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
if (irq > 0) {
*pin = BMI160_PIN_INT1;
return irq;
}
- irq = of_irq_get_byname(of_node, "INT2");
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
if (irq > 0)
*pin = BMI160_PIN_INT2;
@@ -688,7 +676,7 @@ static int bmi160_config_device_irq(struct iio_dev *indio_dev, int irq_type,
return -EINVAL;
}
- open_drain = of_property_read_bool(dev->of_node, "drive-open-drain");
+ open_drain = device_property_read_bool(dev, "drive-open-drain");
return bmi160_config_pin(data->regmap, pin, open_drain, irq_mask,
BMI160_NORMAL_WRITE_USLEEP);
@@ -872,9 +860,6 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
- if (!name && ACPI_HANDLE(dev))
- name = bmi160_match_acpi_device(dev);
-
indio_dev->channels = bmi160_channels;
indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
indio_dev->name = name;
@@ -887,7 +872,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
- irq = bmi160_get_irq(dev->of_node, &int_pin);
+ irq = bmi160_get_irq(dev_fwnode(dev), &int_pin);
if (irq > 0) {
ret = bmi160_setup_irq(indio_dev, irq, int_pin);
if (ret)
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 26398614eddf..02f149d37b17 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -8,10 +8,9 @@
* - 0x68 if SDO is pulled to GND
* - 0x69 if SDO is pulled to VDDIO
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "bmi160.h"
@@ -20,7 +19,7 @@ static int bmi160_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regmap *regmap;
- const char *name = NULL;
+ const char *name;
regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
@@ -31,6 +30,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
if (id)
name = id->name;
+ else
+ name = dev_name(&client->dev);
return bmi160_core_probe(&client->dev, regmap, name, false);
}
@@ -47,19 +48,17 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
-#ifdef CONFIG_OF
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi160" },
{ },
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
static struct i2c_driver bmi160_i2c_driver = {
.driver = {
.name = "bmi160_i2c",
- .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
- .of_match_table = of_match_ptr(bmi160_of_match),
+ .acpi_match_table = bmi160_acpi_match,
+ .of_match_table = bmi160_of_match,
},
.probe = bmi160_i2c_probe,
.id_table = bmi160_i2c_id,
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index 61389b41c6d9..24f7d75c7903 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -5,9 +5,8 @@
* Copyright (c) 2016, Intel Corporation.
*
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -17,6 +16,7 @@ static int bmi160_spi_probe(struct spi_device *spi)
{
struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
+ const char *name;
regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
@@ -24,7 +24,13 @@ static int bmi160_spi_probe(struct spi_device *spi)
regmap);
return PTR_ERR(regmap);
}
- return bmi160_core_probe(&spi->dev, regmap, id->name, true);
+
+ if (id)
+ name = id->name;
+ else
+ name = dev_name(&spi->dev);
+
+ return bmi160_core_probe(&spi->dev, regmap, name, true);
}
static const struct spi_device_id bmi160_spi_id[] = {
@@ -39,20 +45,18 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
-#ifdef CONFIG_OF
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi160" },
{ },
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
static struct spi_driver bmi160_spi_driver = {
.probe = bmi160_spi_probe,
.id_table = bmi160_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
- .of_match_table = of_match_ptr(bmi160_of_match),
+ .acpi_match_table = bmi160_acpi_match,
+ .of_match_table = bmi160_of_match,
.name = "bmi160_spi",
},
};
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 383cc3250342..c3f433ad3af6 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -731,7 +731,6 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
indio_dev->available_scan_masks = inv_icm42600_accel_scan_masks;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&inv_icm42600_buffer_ops);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index cec1dd0e0464..9d94a8518e3c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -743,7 +743,6 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
indio_dev->setup_ops = &inv_icm42600_buffer_ops;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&inv_icm42600_buffer_ops);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 9c625517173a..3636b1bc90f1 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -16,7 +16,7 @@ config INV_MPU6050_I2C
select REGMAP_I2C
help
This driver supports the Invensense MPU6050/9150,
- MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
and IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
select REGMAP_SPI
help
This driver supports the Invensense MPU6000,
- MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
and IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 597768c29a72..86fbbe904050 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -218,6 +218,15 @@ static const struct inv_mpu6050_hw hw_info[] = {
.startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
},
{
+ .whoami = INV_ICM20608D_WHOAMI_VALUE,
+ .name = "ICM20608D",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
+ },
+ {
.whoami = INV_ICM20609_WHOAMI_VALUE,
.name = "ICM20609",
.reg = &reg_set_6500,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 55cffb5fa115..2aa647704a79 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -29,6 +29,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
switch (st->chip_type) {
case INV_ICM20608:
+ case INV_ICM20608D:
case INV_ICM20609:
case INV_ICM20689:
case INV_ICM20602:
@@ -182,6 +183,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
@@ -226,6 +228,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20608
},
{
+ .compatible = "invensense,icm20608d",
+ .data = (void *)INV_ICM20608D
+ },
+ {
.compatible = "invensense,icm20609",
.data = (void *)INV_ICM20609
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index c6aa36ee966a..8e14f20b1314 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -76,6 +76,7 @@ enum inv_devices {
INV_MPU9250,
INV_MPU9255,
INV_ICM20608,
+ INV_ICM20608D,
INV_ICM20609,
INV_ICM20689,
INV_ICM20602,
@@ -394,6 +395,7 @@ struct inv_mpu6050_state {
#define INV_MPU9255_WHOAMI_VALUE 0x73
#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
+#define INV_ICM20608D_WHOAMI_VALUE 0xAE
#define INV_ICM20609_WHOAMI_VALUE 0xA6
#define INV_ICM20689_WHOAMI_VALUE 0x98
#define INV_ICM20602_WHOAMI_VALUE 0x12
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 26a7c2521dc4..e6107b0cc38f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -73,6 +73,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
@@ -113,6 +114,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20608
},
{
+ .compatible = "invensense,icm20608d",
+ .data = (void *)INV_ICM20608D
+ },
+ {
.compatible = "invensense,icm20609",
.data = (void *)INV_ICM20609
},
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 85860217aaf3..fefd0b939100 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -11,9 +11,9 @@ config IIO_ST_LSM6DSX
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
- ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, the accelerometer/gyroscope
- of lsm9ds1 and lsm6dst.
+ ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
+ lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop,
+ the accelerometer/gyroscope of lsm9ds1 and lsm6dst.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 6ac4eac36458..a86dd29a4738 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -31,6 +31,7 @@
#define ST_LSM6DSRX_DEV_NAME "lsm6dsrx"
#define ST_LSM6DST_DEV_NAME "lsm6dst"
#define ST_LSM6DSOP_DEV_NAME "lsm6dsop"
+#define ST_ASM330LHHX_DEV_NAME "asm330lhhx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -49,6 +50,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSRX_ID,
ST_LSM6DST_ID,
ST_LSM6DSOP_ID,
+ ST_ASM330LHHX_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 16730a780964..c7d3730ab1c5 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -14,7 +14,8 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
+ * LSM6DST/LSM6DSOP:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
@@ -746,7 +747,6 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
continue;
ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i],
- INDIO_BUFFER_SOFTWARE,
&st_lsm6dsx_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b1d8d5a66f01..910397716833 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -26,7 +26,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
@@ -786,6 +786,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.hw_id = ST_LSM6DST_ID,
.name = ST_LSM6DST_DEV_NAME,
.wai = 0x6d,
+ }, {
+ .hw_id = ST_ASM330LHHX_ID,
+ .name = ST_ASM330LHHX_DEV_NAME,
+ .wai = 0x6b,
},
},
.channels = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 8b4fc2c15622..715fbdc8190e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm6dsop",
.data = (void *)ST_LSM6DSOP_ID,
},
+ {
+ .compatible = "st,asm330lhhx",
+ .data = (void *)ST_ASM330LHHX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -122,6 +126,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+ { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index e80110b6b280..f5767cf76c1d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm6dsop",
.data = (void *)ST_LSM6DSOP_ID,
},
+ {
+ .compatible = "st,asm330lhhx",
+ .data = (void *)ST_ASM330LHHX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -122,6 +126,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+ { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b078eb2f3c9d..06141ca27e1f 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -510,7 +510,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_buffer *buffer = this_attr->buffer;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
@@ -557,7 +557,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool state;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
@@ -915,7 +915,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (scan_mask == NULL)
return -EINVAL;
} else {
- scan_mask = compound_mask;
+ scan_mask = compound_mask;
}
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
@@ -1059,13 +1059,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- struct iio_buffer *buffer;
+ struct iio_buffer *buffer, *tmp = NULL;
int ret;
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
- indio_dev->currentmode = config->mode;
+ iio_dev_opaque->currentmode = config->mode;
iio_update_demux(indio_dev);
@@ -1097,11 +1097,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_enable(buffer, indio_dev);
- if (ret)
+ if (ret) {
+ tmp = buffer;
goto err_disable_buffers;
+ }
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
if (ret)
@@ -1120,11 +1122,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
return 0;
err_detach_pollfunc:
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
err_disable_buffers:
+ buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
buffer_list)
iio_buffer_disable(buffer, indio_dev);
@@ -1132,7 +1135,7 @@ err_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
- indio_dev->currentmode = INDIO_DIRECT_MODE;
+ iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
@@ -1162,7 +1165,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
@@ -1181,7 +1184,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
- indio_dev->currentmode = INDIO_DIRECT_MODE;
+ iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
return ret;
}
@@ -1300,7 +1303,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool inlist;
- ret = strtobool(buf, &requested_state);
+ ret = kstrtobool(buf, &requested_state);
if (ret < 0)
return ret;
@@ -1629,6 +1632,19 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
if (channels[i].scan_index < 0)
continue;
+ /* Verify that sample bits fit into storage */
+ if (channels[i].scan_type.storagebits <
+ channels[i].scan_type.realbits +
+ channels[i].scan_type.shift) {
+ dev_err(&indio_dev->dev,
+ "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
+ i, channels[i].scan_type.storagebits,
+ channels[i].scan_type.realbits,
+ channels[i].scan_type.shift);
+ ret = -EINVAL;
+ goto error_cleanup_dynamic;
+ }
+
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
@@ -1649,7 +1665,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
}
attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
- attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
+ attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto error_free_scan_mask;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e1ed44dec2ab..adf054c7a75e 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -185,6 +185,20 @@ int iio_device_id(struct iio_dev *indio_dev)
EXPORT_SYMBOL_GPL(iio_device_id);
/**
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev: IIO device structure for device
+ */
+bool iio_buffer_enabled(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->currentmode
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+ INDIO_BUFFER_SOFTWARE);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_enabled);
+
+/**
* iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
* @array: array of strings
* @n: number of strings in the array
@@ -892,8 +906,7 @@ static int __iio_str_to_fixpoint(const char *str, int fract_mult,
} else if (*str == '\n') {
if (*(str + 1) == '\0')
break;
- else
- return -EINVAL;
+ return -EINVAL;
} else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
/* Ignore the dB suffix */
str += sizeof(" dB") - 1;
@@ -1894,20 +1907,22 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- const char *label;
+ struct fwnode_handle *fwnode;
int ret;
if (!indio_dev->info)
return -EINVAL;
iio_dev_opaque->driver_module = this_mod;
- /* If the calling driver did not initialize of_node, do it here */
- if (!indio_dev->dev.of_node && indio_dev->dev.parent)
- indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
- label = of_get_property(indio_dev->dev.of_node, "label", NULL);
- if (label)
- indio_dev->label = label;
+ /* If the calling driver did not initialize firmware node, do it here */
+ if (dev_fwnode(&indio_dev->dev))
+ fwnode = dev_fwnode(&indio_dev->dev);
+ else
+ fwnode = dev_fwnode(indio_dev->dev.parent);
+ device_set_node(&indio_dev->dev, fwnode);
+
+ fwnode_property_read_string(fwnode, "label", &indio_dev->label);
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
@@ -2059,6 +2074,19 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+/**
+ * iio_device_get_current_mode() - helper function providing read-only access to
+ * the opaque @currentmode variable
+ * @indio_dev: IIO device structure for device
+ */
+int iio_device_get_current_mode(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->currentmode;
+}
+EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index ce8b102ce52f..b5e059e15b0a 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -274,7 +274,7 @@ static ssize_t iio_ev_state_store(struct device *dev,
int ret;
bool val;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index f504ed351b3e..585b6cef8fcc 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -444,7 +444,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
int ret;
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index a62c7b4b8678..8537e88f02e3 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -155,7 +155,6 @@ config CM3323
config CM3605
tristate "Capella CM3605 ambient light and proximity sensor"
- depends on OF
help
Say Y here if you want to build a driver for Capella CM3605
ambient light and short range proximity sensor.
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 4141c0fa7bc4..09b831f9f40b 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1003,7 +1003,6 @@ static int apds9960_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&apds9960_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 1d02dfbc29d1..b578b46276cc 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -106,6 +106,7 @@ struct stk3310_data {
struct mutex lock;
bool als_enabled;
bool ps_enabled;
+ uint32_t ps_near_level;
u64 timestamp;
struct regmap *regmap;
struct regmap_field *reg_state;
@@ -135,6 +136,25 @@ static const struct iio_event_spec stk3310_events[] = {
},
};
+static ssize_t stk3310_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stk3310_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->ps_near_level);
+}
+
+static const struct iio_chan_spec_ext_info stk3310_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = stk3310_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
static const struct iio_chan_spec stk3310_channels[] = {
{
.type = IIO_LIGHT,
@@ -151,6 +171,7 @@ static const struct iio_chan_spec stk3310_channels[] = {
BIT(IIO_CHAN_INFO_INT_TIME),
.event_spec = stk3310_events,
.num_event_specs = ARRAY_SIZE(stk3310_events),
+ .ext_info = stk3310_ext_info,
}
};
@@ -581,6 +602,10 @@ static int stk3310_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->client = client;
i2c_set_clientdata(client, indio_dev);
+
+ device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->ps_near_level);
+
mutex_init(&data->lock);
ret = stk3310_regmap_init(data);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 729f14d9f2a4..dd9051f1cc1a 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -15,7 +15,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
+
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -549,10 +551,10 @@ prox_poll_err:
static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
{
- struct device_node *of_node = chip->client->dev.of_node;
+ struct device *dev = &chip->client->dev;
int ret, tmp, i;
- ret = of_property_read_u32(of_node, "led-max-microamp", &tmp);
+ ret = device_property_read_u32(dev, "led-max-microamp", &tmp);
if (ret < 0)
return ret;
@@ -563,20 +565,18 @@ static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
}
}
- dev_err(&chip->client->dev, "Invalid value %d for led-max-microamp\n",
- tmp);
+ dev_err(dev, "Invalid value %d for led-max-microamp\n", tmp);
return -EINVAL;
-
}
static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
{
- struct device_node *of_node = chip->client->dev.of_node;
+ struct device *dev = &chip->client->dev;
int i, ret, num_leds, prox_diode_mask;
u32 leds[TSL2772_MAX_PROX_LEDS];
- ret = of_property_count_u32_elems(of_node, "amstaos,proximity-diodes");
+ ret = device_property_count_u32(dev, "amstaos,proximity-diodes");
if (ret < 0)
return ret;
@@ -584,12 +584,9 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
if (num_leds > TSL2772_MAX_PROX_LEDS)
num_leds = TSL2772_MAX_PROX_LEDS;
- ret = of_property_read_u32_array(of_node, "amstaos,proximity-diodes",
- leds, num_leds);
+ ret = device_property_read_u32_array(dev, "amstaos,proximity-diodes", leds, num_leds);
if (ret < 0) {
- dev_err(&chip->client->dev,
- "Invalid value for amstaos,proximity-diodes: %d.\n",
- ret);
+ dev_err(dev, "Invalid value for amstaos,proximity-diodes: %d.\n", ret);
return ret;
}
@@ -600,9 +597,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
else if (leds[i] == 1)
prox_diode_mask |= TSL2772_DIODE1;
else {
- dev_err(&chip->client->dev,
- "Invalid value %d in amstaos,proximity-diodes.\n",
- leds[i]);
+ dev_err(dev, "Invalid value %d in amstaos,proximity-diodes.\n", leds[i]);
return -EINVAL;
}
}
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 54445365c4bc..07eb619bcfe8 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -9,7 +9,6 @@ menu "Magnetometer sensors"
config AK8974
tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
depends on I2C
- depends on OF
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 26195733ea3e..707ba25360b8 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -141,18 +141,10 @@ static irqreturn_t rm3100_irq_handler(int irq, void *d)
struct iio_dev *indio_dev = d;
struct rm3100_data *data = iio_priv(indio_dev);
- switch (indio_dev->currentmode) {
- case INDIO_DIRECT_MODE:
+ if (!iio_buffer_enabled(indio_dev))
complete(&data->measuring_done);
- break;
- case INDIO_BUFFER_TRIGGERED:
+ else
iio_trigger_poll(data->drdy_trig);
- break;
- default:
- dev_err(indio_dev->dev.parent,
- "device mode out of control, current mode: %d",
- indio_dev->currentmode);
- }
return IRQ_WAKE_THREAD;
}
@@ -377,7 +369,7 @@ static int rm3100_set_samp_freq(struct iio_dev *indio_dev, int val, int val2)
goto unlock_return;
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
/* Writing TMRC registers requires CMM reset. */
ret = regmap_write(regmap, RM3100_REG_CMM, 0);
if (ret < 0)
@@ -553,7 +545,6 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
indio_dev->channels = rm3100_channels;
indio_dev->num_channels = ARRAY_SIZE(rm3100_channels);
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
- indio_dev->currentmode = INDIO_DIRECT_MODE;
if (!irq)
data->use_interrupt = false;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 74435f4a427d..e2fd233b3626 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -540,24 +540,17 @@ read_error:
static int st_magn_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
- err = -EINVAL;
+ return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/multiplexer/Kconfig b/drivers/iio/multiplexer/Kconfig
index a1e1332d1206..928f424a1ed3 100644
--- a/drivers/iio/multiplexer/Kconfig
+++ b/drivers/iio/multiplexer/Kconfig
@@ -9,7 +9,6 @@ menu "Multiplexers"
config IIO_MUX
tristate "IIO multiplexer driver"
select MULTIPLEXER
- depends on OF || COMPILE_TEST
help
Say yes here to build support for the IIO multiplexer.
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index f422d44377df..93558fddfa9b 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -10,11 +10,12 @@
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mux/consumer.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct mux_ext_info_cache {
char *data;
@@ -324,37 +325,21 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
return 0;
}
-/*
- * Same as of_property_for_each_string(), but also keeps track of the
- * index of each string.
- */
-#define of_property_for_each_string_index(np, propname, prop, s, i) \
- for (prop = of_find_property(np, propname, NULL), \
- s = of_prop_next_string(prop, NULL), \
- i = 0; \
- s; \
- s = of_prop_next_string(prop, s), \
- i++)
-
static int mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev;
struct iio_channel *parent;
struct mux *mux;
- struct property *prop;
- const char *label;
+ const char **labels;
+ int all_children;
+ int children;
u32 state;
int sizeof_ext_info;
- int children;
int sizeof_priv;
int i;
int ret;
- if (!np)
- return -ENODEV;
-
parent = devm_iio_channel_get(dev, "parent");
if (IS_ERR(parent))
return dev_err_probe(dev, PTR_ERR(parent),
@@ -366,9 +351,21 @@ static int mux_probe(struct platform_device *pdev)
sizeof_ext_info *= sizeof(*mux->ext_info);
}
+ all_children = device_property_string_array_count(dev, "channels");
+ if (all_children < 0)
+ return all_children;
+
+ labels = devm_kmalloc_array(dev, all_children, sizeof(*labels), GFP_KERNEL);
+ if (!labels)
+ return -ENOMEM;
+
+ ret = device_property_read_string_array(dev, "channels", labels, all_children);
+ if (ret < 0)
+ return ret;
+
children = 0;
- of_property_for_each_string(np, "channels", prop, label) {
- if (*label)
+ for (state = 0; state < all_children; state++) {
+ if (*labels[state])
children++;
}
if (children <= 0) {
@@ -395,7 +392,7 @@ static int mux_probe(struct platform_device *pdev)
mux->cached_state = -1;
mux->delay_us = 0;
- of_property_read_u32(np, "settle-time-us", &mux->delay_us);
+ device_property_read_u32(dev, "settle-time-us", &mux->delay_us);
indio_dev->name = dev_name(dev);
indio_dev->info = &mux_info;
@@ -426,11 +423,11 @@ static int mux_probe(struct platform_device *pdev)
}
i = 0;
- of_property_for_each_string_index(np, "channels", prop, label, state) {
- if (!*label)
+ for (state = 0; state < all_children; state++) {
+ if (!*labels[state])
continue;
- ret = mux_configure_channel(dev, mux, state, label, i++);
+ ret = mux_configure_channel(dev, mux, state, labels[state], i++);
if (ret < 0)
return ret;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5b93933a2e27..76913a2028d2 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -560,16 +560,12 @@ static int st_press_write_raw(struct iio_dev *indio_dev,
int val2,
long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
return -EINVAL;
}
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index ad4b1fb2607a..0bca5f74de68 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -10,12 +10,14 @@
* https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/of_irq.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/property.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -209,7 +211,7 @@ static int mb1232_probe(struct i2c_client *client,
init_completion(&data->ranging);
- data->irqnr = irq_of_parse_and_map(dev->of_node, 0);
+ data->irqnr = fwnode_irq_get(dev_fwnode(&client->dev), 0);
if (data->irqnr <= 0) {
/* usage of interrupt is optional */
data->irqnr = -1;
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 24a97d41e115..d56e037378de 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -29,9 +29,8 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/sched.h>
@@ -288,7 +287,7 @@ static int ping_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = dev;
- data->cfg = of_device_get_match_data(dev);
+ data->cfg = device_get_match_data(dev);
mutex_init(&data->lock);
init_completion(&data->rising);
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 661a79ea200d..a284b20529fb 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
u16 tries = 20;
u8 buffer[12];
int ret;
+ unsigned long time_left;
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
if (ret < 0)
@@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
if (data->client->irq) {
reinit_completion(&data->completion);
- ret = wait_for_completion_timeout(&data->completion, HZ/10);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ time_left = wait_for_completion_timeout(&data->completion, HZ/10);
+ if (time_left == 0)
return -ETIMEDOUT;
vl53l0x_clear_irq(data);
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 301c3f13fb26..4fc654275155 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -12,11 +12,15 @@
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
/* register map */
#define LTC2983_STATUS_REG 0x0000
#define LTC2983_TEMP_RES_START_REG 0x0010
@@ -219,7 +223,7 @@ struct ltc2983_sensor {
struct ltc2983_custom_sensor {
/* raw table sensor data */
- u8 *table;
+ void *table;
size_t size;
/* address offset */
s8 offset;
@@ -377,25 +381,25 @@ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
}
-static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
- struct ltc2983_data *st,
- const struct device_node *np,
- const char *propname,
- const bool is_steinhart,
- const u32 resolution,
- const bool has_signed)
+static struct ltc2983_custom_sensor *
+__ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle *fn,
+ const char *propname, const bool is_steinhart,
+ const u32 resolution, const bool has_signed)
{
struct ltc2983_custom_sensor *new_custom;
- u8 index, n_entries, tbl = 0;
struct device *dev = &st->spi->dev;
/*
* For custom steinhart, the full u32 is taken. For all the others
* the MSB is discarded.
*/
const u8 n_size = is_steinhart ? 4 : 3;
- const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
+ u8 index, n_entries;
+ int ret;
- n_entries = of_property_count_elems_of_size(np, propname, e_size);
+ if (is_steinhart)
+ n_entries = fwnode_property_count_u32(fn, propname);
+ else
+ n_entries = fwnode_property_count_u64(fn, propname);
/* n_entries must be an even number */
if (!n_entries || (n_entries % 2) != 0) {
dev_err(dev, "Number of entries either 0 or not even\n");
@@ -409,8 +413,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
new_custom->size = n_entries * n_size;
/* check Steinhart size */
if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
- dev_err(dev, "Steinhart sensors size(%zu) must be 24",
- new_custom->size);
+ dev_err(dev, "Steinhart sensors size(%zu) must be %u\n", new_custom->size,
+ LTC2983_CUSTOM_STEINHART_SIZE);
return ERR_PTR(-EINVAL);
}
/* Check space on the table. */
@@ -423,21 +427,33 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
}
/* allocate the table */
- new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+ if (is_steinhart)
+ new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u32), GFP_KERNEL);
+ else
+ new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u64), GFP_KERNEL);
if (!new_custom->table)
return ERR_PTR(-ENOMEM);
- for (index = 0; index < n_entries; index++) {
- u64 temp = 0, j;
- /*
- * Steinhart sensors are configured with raw values in the
- * devicetree. For the other sensors we must convert the
- * value to raw. The odd index's correspond to temperarures
- * and always have 1/1024 of resolution. Temperatures also
- * come in kelvin, so signed values is not possible
- */
- if (!is_steinhart) {
- of_property_read_u64_index(np, propname, index, &temp);
+ /*
+ * Steinhart sensors are configured with raw values in the firmware
+ * node. For the other sensors we must convert the value to raw.
+ * The odd index's correspond to temperatures and always have 1/1024
+ * of resolution. Temperatures also come in Kelvin, so signed values
+ * are not possible.
+ */
+ if (is_steinhart) {
+ ret = fwnode_property_read_u32_array(fn, propname, new_custom->table, n_entries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ cpu_to_be32_array(new_custom->table, new_custom->table, n_entries);
+ } else {
+ ret = fwnode_property_read_u64_array(fn, propname, new_custom->table, n_entries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ for (index = 0; index < n_entries; index++) {
+ u64 temp = ((u64 *)new_custom->table)[index];
if ((index % 2) != 0)
temp = __convert_to_raw(temp, 1024);
@@ -445,16 +461,9 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
temp = __convert_to_raw_sign(temp, resolution);
else
temp = __convert_to_raw(temp, resolution);
- } else {
- u32 t32;
- of_property_read_u32_index(np, propname, index, &t32);
- temp = t32;
+ put_unaligned_be24(temp, new_custom->table + index * 3);
}
-
- for (j = 0; j < n_size; j++)
- new_custom->table[tbl++] =
- temp >> (8 * (n_size - j - 1));
}
new_custom->is_steinhart = is_steinhart;
@@ -597,13 +606,12 @@ static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
return __ltc2983_chan_assign_common(st, sensor, chan_val);
}
-static struct ltc2983_sensor *ltc2983_thermocouple_new(
- const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_thermocouple *thermo;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 oc_current;
int ret;
@@ -611,11 +619,10 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
if (!thermo)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
- ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
- &oc_current);
+ ret = fwnode_property_read_u32(child, "adi,sensor-oc-current-microamp", &oc_current);
if (!ret) {
switch (oc_current) {
case 10:
@@ -651,20 +658,18 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
return ERR_PTR(-EINVAL);
}
- phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
- if (phandle) {
- int ret;
-
- ret = of_property_read_u32(phandle, "reg",
- &thermo->cold_junction_chan);
+ ref = fwnode_find_reference(child, "adi,cold-junction-handle", 0);
+ if (IS_ERR(ref)) {
+ ref = NULL;
+ } else {
+ ret = fwnode_property_read_u32(ref, "reg", &thermo->cold_junction_chan);
if (ret) {
/*
* This would be catched later but we can just return
* the error right away.
*/
dev_err(&st->spi->dev, "Property reg must be given\n");
- of_node_put(phandle);
- return ERR_PTR(-EINVAL);
+ goto fail;
}
}
@@ -676,8 +681,8 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
propname, false,
16384, true);
if (IS_ERR(thermo->custom)) {
- of_node_put(phandle);
- return ERR_CAST(thermo->custom);
+ ret = PTR_ERR(thermo->custom);
+ goto fail;
}
}
@@ -685,37 +690,41 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &thermo->sensor;
+
+fail:
+ fwnode_handle_put(ref);
+ return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_rtd *rtd;
int ret = 0;
struct device *dev = &st->spi->dev;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 excitation_current = 0, n_wires = 0;
rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
if (!rtd)
return ERR_PTR(-ENOMEM);
- phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
- if (!phandle) {
+ ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+ if (IS_ERR(ref)) {
dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_PTR(-EINVAL);
+ return ERR_CAST(ref);
}
- ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+ ret = fwnode_property_read_u32(ref, "reg", &rtd->r_sense_chan);
if (ret) {
dev_err(dev, "Property reg must be given\n");
goto fail;
}
- ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+ ret = fwnode_property_read_u32(child, "adi,number-of-wires", &n_wires);
if (!ret) {
switch (n_wires) {
case 2:
@@ -738,9 +747,9 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
}
}
- if (of_property_read_bool(child, "adi,rsense-share")) {
+ if (fwnode_property_read_bool(child, "adi,rsense-share")) {
/* Current rotation is only available with rsense sharing */
- if (of_property_read_bool(child, "adi,current-rotate")) {
+ if (fwnode_property_read_bool(child, "adi,current-rotate")) {
if (n_wires == 2 || n_wires == 3) {
dev_err(dev,
"Rotation not allowed for 2/3 Wire RTDs");
@@ -803,8 +812,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
"adi,custom-rtd",
false, 2048, false);
if (IS_ERR(rtd->custom)) {
- of_node_put(phandle);
- return ERR_CAST(rtd->custom);
+ ret = PTR_ERR(rtd->custom);
+ goto fail;
}
}
@@ -812,8 +821,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
rtd->sensor.fault_handler = ltc2983_common_fault_handler;
rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-microamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
if (ret) {
/* default to 5uA */
rtd->excitation_current = 1;
@@ -852,23 +861,22 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
}
}
- of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+ fwnode_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &rtd->sensor;
fail:
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_thermistor_new(
- const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_thermistor *thermistor;
struct device *dev = &st->spi->dev;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 excitation_current = 0;
int ret = 0;
@@ -876,23 +884,23 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
if (!thermistor)
return ERR_PTR(-ENOMEM);
- phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
- if (!phandle) {
+ ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+ if (IS_ERR(ref)) {
dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_PTR(-EINVAL);
+ return ERR_CAST(ref);
}
- ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+ ret = fwnode_property_read_u32(ref, "reg", &thermistor->r_sense_chan);
if (ret) {
dev_err(dev, "rsense channel must be configured...\n");
goto fail;
}
- if (of_property_read_bool(child, "adi,single-ended")) {
+ if (fwnode_property_read_bool(child, "adi,single-ended")) {
thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
- } else if (of_property_read_bool(child, "adi,rsense-share")) {
+ } else if (fwnode_property_read_bool(child, "adi,rsense-share")) {
/* rotation is only possible if sharing rsense */
- if (of_property_read_bool(child, "adi,current-rotate"))
+ if (fwnode_property_read_bool(child, "adi,current-rotate"))
thermistor->sensor_config =
LTC2983_THERMISTOR_C_ROTATE(1);
else
@@ -926,16 +934,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
steinhart,
64, false);
if (IS_ERR(thermistor->custom)) {
- of_node_put(phandle);
- return ERR_CAST(thermistor->custom);
+ ret = PTR_ERR(thermistor->custom);
+ goto fail;
}
}
/* set common parameters */
thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-nanoamp",
+ &excitation_current);
if (ret) {
/* Auto range is not allowed for custom sensors */
if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
@@ -999,17 +1007,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
}
}
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &thermistor->sensor;
fail:
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_diode_new(
- const struct device_node *child,
- const struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_diode *diode;
u32 temp = 0, excitation_current = 0;
@@ -1019,13 +1026,13 @@ static struct ltc2983_sensor *ltc2983_diode_new(
if (!diode)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
diode->sensor_config = LTC2983_DIODE_SGL(1);
- if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+ if (fwnode_property_read_bool(child, "adi,three-conversion-cycles"))
diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
- if (of_property_read_bool(child, "adi,average-on"))
+ if (fwnode_property_read_bool(child, "adi,average-on"))
diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
/* validate channel index */
@@ -1040,8 +1047,8 @@ static struct ltc2983_sensor *ltc2983_diode_new(
diode->sensor.fault_handler = ltc2983_common_fault_handler;
diode->sensor.assign_chan = ltc2983_diode_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-microamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
if (!ret) {
switch (excitation_current) {
case 10:
@@ -1064,7 +1071,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
}
}
- of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+ fwnode_property_read_u32(child, "adi,ideal-factor-value", &temp);
/* 2^20 resolution */
diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
@@ -1072,7 +1079,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
return &diode->sensor;
}
-static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct fwnode_handle *child,
struct ltc2983_data *st,
const struct ltc2983_sensor *sensor)
{
@@ -1091,7 +1098,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
return ERR_PTR(-EINVAL);
}
- ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+ ret = fwnode_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
if (ret) {
dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
return ERR_PTR(-EINVAL);
@@ -1110,7 +1117,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
return &rsense->sensor;
}
-static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child,
struct ltc2983_data *st,
const struct ltc2983_sensor *sensor)
{
@@ -1120,7 +1127,7 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
if (!adc)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
adc->single_ended = true;
if (!adc->single_ended &&
@@ -1264,17 +1271,15 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data)
static int ltc2983_parse_dt(struct ltc2983_data *st)
{
- struct device_node *child;
struct device *dev = &st->spi->dev;
+ struct fwnode_handle *child;
int ret = 0, chan = 0, channel_avail_mask = 0;
- of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
- &st->mux_delay_config);
+ device_property_read_u32(dev, "adi,mux-delay-config-us", &st->mux_delay_config);
- of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
- &st->filter_notch_freq);
+ device_property_read_u32(dev, "adi,filter-notch-freq", &st->filter_notch_freq);
- st->num_channels = of_get_available_child_count(dev->of_node);
+ st->num_channels = device_get_child_node_count(dev);
if (!st->num_channels) {
dev_err(&st->spi->dev, "At least one channel must be given!");
return -EINVAL;
@@ -1286,10 +1291,10 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
return -ENOMEM;
st->iio_channels = st->num_channels;
- for_each_available_child_of_node(dev->of_node, child) {
+ device_for_each_child_node(dev, child) {
struct ltc2983_sensor sensor;
- ret = of_property_read_u32(child, "reg", &sensor.chan);
+ ret = fwnode_property_read_u32(child, "reg", &sensor.chan);
if (ret) {
dev_err(dev, "reg property must given for child nodes\n");
goto put_child;
@@ -1299,8 +1304,8 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
sensor.chan > LTC2983_MAX_CHANNELS_NR) {
ret = -EINVAL;
- dev_err(dev,
- "chan:%d must be from 1 to 20\n", sensor.chan);
+ dev_err(dev, "chan:%d must be from %u to %u\n", sensor.chan,
+ LTC2983_MIN_CHANNELS_NR, LTC2983_MAX_CHANNELS_NR);
goto put_child;
} else if (channel_avail_mask & BIT(sensor.chan)) {
ret = -EINVAL;
@@ -1308,8 +1313,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
goto put_child;
}
- ret = of_property_read_u32(child, "adi,sensor-type",
- &sensor.type);
+ ret = fwnode_property_read_u32(child, "adi,sensor-type", &sensor.type);
if (ret) {
dev_err(dev,
"adi,sensor-type property must given for child nodes\n");
@@ -1363,7 +1367,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
return 0;
put_child:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index 54840881259a..8307aae2cb45 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -7,9 +7,11 @@
*/
#include <linux/ctype.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -422,9 +424,7 @@ static int max31856_probe(struct spi_device *spi)
indio_dev->channels = max31856_channels;
indio_dev->num_channels = ARRAY_SIZE(max31856_channels);
- ret = of_property_read_u32(spi->dev.of_node, "thermocouple-type",
- &data->thermocouple_type);
-
+ ret = device_property_read_u32(&spi->dev, "thermocouple-type", &data->thermocouple_type);
if (ret) {
dev_info(&spi->dev,
"Could not read thermocouple type DT property, configuring as a K-Type\n");
diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c
index 86c3f3509a26..e3bb78184c6e 100644
--- a/drivers/iio/temperature/max31865.c
+++ b/drivers/iio/temperature/max31865.c
@@ -12,9 +12,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -305,7 +307,7 @@ static int max31865_probe(struct spi_device *spi)
indio_dev->channels = max31865_channels;
indio_dev->num_channels = ARRAY_SIZE(max31865_channels);
- if (of_property_read_bool(spi->dev.of_node, "maxim,3-wire")) {
+ if (device_property_read_bool(&spi->dev, "maxim,3-wire")) {
/* select 3 wire */
data->three_wire = 1;
} else {
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 2a4b75897910..f1a8704e6cc1 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -176,16 +176,15 @@ out1:
static int iio_sysfs_trigger_remove(int id)
{
- bool foundit = false;
- struct iio_sysfs_trig *t;
+ struct iio_sysfs_trig *t = NULL, *iter;
mutex_lock(&iio_sysfs_trig_list_mut);
- list_for_each_entry(t, &iio_sysfs_trig_list, l)
- if (id == t->id) {
- foundit = true;
+ list_for_each_entry(iter, &iio_sysfs_trig_list, l)
+ if (id == iter->id) {
+ t = iter;
break;
}
- if (!foundit) {
+ if (!t) {
mutex_unlock(&iio_sysfs_trig_list_mut);
return -EINVAL;
}
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 505a032e2786..9dcf3f51f2dd 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -402,6 +402,7 @@ config JOYSTICK_N64
config JOYSTICK_SENSEHAT
tristate "Raspberry Pi Sense HAT joystick"
depends on INPUT && I2C
+ depends on HAS_IOMEM
select MFD_SIMPLE_MFD_I2C
help
Say Y here if you want to enable the driver for the
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 8a36d78fed63..946bf75aa106 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -639,7 +639,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
return -ENOMEM;
pipe = usb_rcvintpipe(udev, ar2->ep[i]->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = maxp > 4 ? 4 : maxp;
usb_fill_int_urb(ar2->urb[i], udev, pipe, ar2->buf[i], maxp,
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index f515fae465c3..728325a2d574 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -745,7 +745,7 @@ static int cm109_usb_probe(struct usb_interface *intf,
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %d\n",
ret, USB_PKT_LEN);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index c4e0e1886061..c1c733a9cb89 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -374,7 +374,7 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
if (maxp < POWERMATE_PAYLOAD_SIZE_MIN || maxp > POWERMATE_PAYLOAD_SIZE_MAX) {
printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n",
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cbb1599a520e..480476121c01 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
},
{
/*
- * Lenovo Yoga Tab2 1051L, something messes with the home-button
+ * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
* IRQ settings, leading to a non working home-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
},
},
{} /* Terminating entry */
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 1fc9b3e7007f..8d8ebdc2039b 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -481,7 +481,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
error_evtchan:
xenbus_free_evtchn(dev, evtchn);
error_grant:
- gnttab_end_foreign_access(info->gref, 0UL);
+ gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
return ret;
}
@@ -492,7 +492,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
if (info->gref >= 0)
- gnttab_end_foreign_access(info->gref, 0UL);
+ gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
}
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 8ab01c7601b1..69420781db30 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -905,7 +905,7 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %zd\n",
ret, USB_PKT_LEN);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 59a14505b9cd..ca150618d32f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_data)
goto err_free_bt_buffer;
- if (dev->bt_urb)
+ if (dev->bt_urb) {
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
+ dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ }
+
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
+ dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 3332b77eef2a..f04ba12dbfa8 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mouse-pxa930_trkball.h>
/* Trackball Controller Register Definitions */
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index a38d1fe97334..56c7e471ac32 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -130,7 +130,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 749edbdb7ffa..c608ac505d1b 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -296,7 +296,7 @@ static int pegasus_probe(struct usb_interface *intf,
pegasus->intf = intf;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- pegasus->data_len = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ pegasus->data_len = usb_maxpacket(dev, pipe);
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
&pegasus->data_dma);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 43c7d6e5bdc0..2d70c945b20a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -902,6 +902,7 @@ config TOUCHSCREEN_WM9713
config TOUCHSCREEN_WM97XX_MAINSTONE
tristate "WM97xx Mainstone/Palm accelerated touch"
depends on TOUCHSCREEN_WM97XX && ARCH_PXA
+ depends on SND_PXA2XX_LIB_AC97
help
Say Y here for support for streaming mode with WM97xx touchscreens
on Mainstone, Palm Tungsten T5, TX and LifeDrive systems.
@@ -914,6 +915,7 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
config TOUCHSCREEN_WM97XX_ZYLONITE
tristate "Zylonite accelerated touch"
depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
+ depends on SND_PXA2XX_LIB_AC97
select TOUCHSCREEN_WM9713
help
Say Y here for support for streaming mode with the touchscreen
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index f8564b398eb3..c39f49720fe4 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -21,13 +21,14 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/wm97xx.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/wm97xx.h>
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
#include <asm/mach-types.h>
@@ -41,24 +42,23 @@ struct continuous {
#define WM_READS(sp) ((sp / HZ) + 1)
static const struct continuous cinfo[] = {
- {WM9705_ID2, 0, WM_READS(94), 94},
- {WM9705_ID2, 1, WM_READS(188), 188},
- {WM9705_ID2, 2, WM_READS(375), 375},
- {WM9705_ID2, 3, WM_READS(750), 750},
- {WM9712_ID2, 0, WM_READS(94), 94},
- {WM9712_ID2, 1, WM_READS(188), 188},
- {WM9712_ID2, 2, WM_READS(375), 375},
- {WM9712_ID2, 3, WM_READS(750), 750},
- {WM9713_ID2, 0, WM_READS(94), 94},
- {WM9713_ID2, 1, WM_READS(120), 120},
- {WM9713_ID2, 2, WM_READS(154), 154},
- {WM9713_ID2, 3, WM_READS(188), 188},
+ { WM9705_ID2, 0, WM_READS(94), 94 },
+ { WM9705_ID2, 1, WM_READS(188), 188 },
+ { WM9705_ID2, 2, WM_READS(375), 375 },
+ { WM9705_ID2, 3, WM_READS(750), 750 },
+ { WM9712_ID2, 0, WM_READS(94), 94 },
+ { WM9712_ID2, 1, WM_READS(188), 188 },
+ { WM9712_ID2, 2, WM_READS(375), 375 },
+ { WM9712_ID2, 3, WM_READS(750), 750 },
+ { WM9713_ID2, 0, WM_READS(94), 94 },
+ { WM9713_ID2, 1, WM_READS(120), 120 },
+ { WM9713_ID2, 2, WM_READS(154), 154 },
+ { WM9713_ID2, 3, WM_READS(188), 188 },
};
/* continuous speed index */
static int sp_idx;
-static u16 last, tries;
-static int irq;
+static struct gpio_desc *gpiod_irq;
/*
* Pen sampling frequency (Hz) in continuous mode.
@@ -97,44 +97,40 @@ MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
/* flush AC97 slot 5 FIFO on pxa machines */
-#ifdef CONFIG_PXA27x
-static void wm97xx_acc_pen_up(struct wm97xx *wm)
-{
- schedule_timeout_uninterruptible(1);
-
- while (MISR & (1 << 2))
- MODR;
-}
-#else
static void wm97xx_acc_pen_up(struct wm97xx *wm)
{
unsigned int count;
- schedule_timeout_uninterruptible(1);
+ msleep(1);
- for (count = 0; count < 16; count++)
- MODR;
+ if (cpu_is_pxa27x()) {
+ while (pxa2xx_ac97_read_misr() & (1 << 2))
+ pxa2xx_ac97_read_modr();
+ } else if (cpu_is_pxa3xx()) {
+ for (count = 0; count < 16; count++)
+ pxa2xx_ac97_read_modr();
+ }
}
-#endif
static int wm97xx_acc_pen_down(struct wm97xx *wm)
{
u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
int reads = 0;
+ static u16 last, tries;
/* When the AC97 queue has been drained we need to allow time
* to buffer up samples otherwise we end up spinning polling
* for samples. The controller can't have a suitably low
* threshold set to use the notifications it gives.
*/
- schedule_timeout_uninterruptible(1);
+ msleep(1);
if (tries > 5) {
tries = 0;
return RC_PENUP;
}
- x = MODR;
+ x = pxa2xx_ac97_read_modr();
if (x == last) {
tries++;
return RC_AGAIN;
@@ -142,10 +138,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
last = x;
do {
if (reads)
- x = MODR;
- y = MODR;
+ x = pxa2xx_ac97_read_modr();
+ y = pxa2xx_ac97_read_modr();
if (pressure)
- p = MODR;
+ p = pxa2xx_ac97_read_modr();
dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
x, y, p);
@@ -194,28 +190,23 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
/* IRQ driven touchscreen is used on Palm hardware */
if (machine_is_palmt5() || machine_is_palmtx() || machine_is_palmld()) {
pen_int = 1;
- irq = 27;
/* There is some obscure mutant of WM9712 interbred with WM9713
* used on Palm HW */
wm->variant = WM97xx_WM1613;
- } else if (machine_is_mainstone() && pen_int)
- irq = 4;
-
- if (irq) {
- ret = gpio_request(irq, "Touchscreen IRQ");
- if (ret)
- goto out;
-
- ret = gpio_direction_input(irq);
- if (ret) {
- gpio_free(irq);
- goto out;
- }
+ } else if (machine_is_zylonite()) {
+ pen_int = 1;
+ }
- wm->pen_irq = gpio_to_irq(irq);
+ if (pen_int) {
+ gpiod_irq = gpiod_get(wm->dev, "touch", GPIOD_IN);
+ if (IS_ERR(gpiod_irq))
+ pen_int = 0;
+ }
+
+ if (pen_int) {
+ wm->pen_irq = gpiod_to_irq(gpiod_irq);
irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
- } else /* pen irq not supported */
- pen_int = 0;
+ }
/* codec specific irq config */
if (pen_int) {
@@ -242,7 +233,6 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
}
}
-out:
return ret;
}
@@ -250,28 +240,19 @@ static void wm97xx_acc_shutdown(struct wm97xx *wm)
{
/* codec specific deconfig */
if (pen_int) {
- if (irq)
- gpio_free(irq);
+ if (gpiod_irq)
+ gpiod_put(gpiod_irq);
wm->pen_irq = 0;
}
}
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
- if (enable)
- enable_irq(wm->pen_irq);
- else
- disable_irq_nosync(wm->pen_irq);
-}
-
static struct wm97xx_mach_ops mainstone_mach_ops = {
- .acc_enabled = 1,
- .acc_pen_up = wm97xx_acc_pen_up,
- .acc_pen_down = wm97xx_acc_pen_down,
- .acc_startup = wm97xx_acc_startup,
- .acc_shutdown = wm97xx_acc_shutdown,
- .irq_enable = wm97xx_irq_enable,
- .irq_gpio = WM97XX_GPIO_2,
+ .acc_enabled = 1,
+ .acc_pen_up = wm97xx_acc_pen_up,
+ .acc_pen_down = wm97xx_acc_pen_down,
+ .acc_startup = wm97xx_acc_startup,
+ .acc_shutdown = wm97xx_acc_shutdown,
+ .irq_gpio = WM97XX_GPIO_2,
};
static int mainstone_wm97xx_probe(struct platform_device *pdev)
@@ -286,14 +267,15 @@ static int mainstone_wm97xx_remove(struct platform_device *pdev)
struct wm97xx *wm = platform_get_drvdata(pdev);
wm97xx_unregister_mach_ops(wm);
+
return 0;
}
static struct platform_driver mainstone_wm97xx_driver = {
- .probe = mainstone_wm97xx_probe,
- .remove = mainstone_wm97xx_remove,
- .driver = {
- .name = "wm97xx-touch",
+ .probe = mainstone_wm97xx_probe,
+ .remove = mainstone_wm97xx_remove,
+ .driver = {
+ .name = "wm97xx-touch",
},
};
module_platform_driver(mainstone_wm97xx_driver);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 1b58611c8084..2757c7768ffe 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -285,11 +285,12 @@ void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode)
EXPORT_SYMBOL_GPL(wm97xx_set_suspend_mode);
/*
- * Handle a pen down interrupt.
+ * Codec PENDOWN irq handler
+ *
*/
-static void wm97xx_pen_irq_worker(struct work_struct *work)
+static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
{
- struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work);
+ struct wm97xx *wm = dev_id;
int pen_was_down = wm->pen_is_down;
/* do we need to enable the touch panel reader */
@@ -343,27 +344,6 @@ static void wm97xx_pen_irq_worker(struct work_struct *work)
if (!wm->pen_is_down && wm->mach_ops->acc_enabled)
wm->mach_ops->acc_pen_up(wm);
- wm->mach_ops->irq_enable(wm, 1);
-}
-
-/*
- * Codec PENDOWN irq handler
- *
- * We have to disable the codec interrupt in the handler because it
- * can take up to 1ms to clear the interrupt source. We schedule a task
- * in a work queue to do the actual interaction with the chip. The
- * interrupt is then enabled again in the slow handler when the source
- * has been cleared.
- */
-static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
-{
- struct wm97xx *wm = dev_id;
-
- if (!work_pending(&wm->pen_event_work)) {
- wm->mach_ops->irq_enable(wm, 0);
- queue_work(wm->ts_workq, &wm->pen_event_work);
- }
-
return IRQ_HANDLED;
}
@@ -374,12 +354,9 @@ static int wm97xx_init_pen_irq(struct wm97xx *wm)
{
u16 reg;
- /* If an interrupt is supplied an IRQ enable operation must also be
- * provided. */
- BUG_ON(!wm->mach_ops->irq_enable);
-
- if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, IRQF_SHARED,
- "wm97xx-pen", wm)) {
+ if (request_threaded_irq(wm->pen_irq, NULL, wm97xx_pen_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "wm97xx-pen", wm)) {
dev_err(wm->dev,
"Failed to register pen down interrupt, polling");
wm->pen_irq = 0;
@@ -509,7 +486,6 @@ static int wm97xx_ts_input_open(struct input_dev *idev)
wm->codec->dig_enable(wm, 1);
INIT_DELAYED_WORK(&wm->ts_reader, wm97xx_ts_reader);
- INIT_WORK(&wm->pen_event_work, wm97xx_pen_irq_worker);
wm->ts_reader_min_interval = HZ >= 100 ? HZ / 100 : 1;
if (wm->ts_reader_min_interval < 1)
@@ -560,10 +536,6 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
wm->pen_is_down = 0;
- /* Balance out interrupt disables/enables */
- if (cancel_work_sync(&wm->pen_event_work))
- wm->mach_ops->irq_enable(wm, 1);
-
/* ts_reader rearms itself so we need to explicitly stop it
* before we destroy the workqueue.
*/
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index 0f4ac7f844ce..a70fe4abe520 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -17,15 +17,14 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/wm97xx.h>
-#include <mach/hardware.h>
-#include <mach/mfp.h>
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
struct continuous {
u16 id; /* codec id */
@@ -80,7 +79,7 @@ static void wm97xx_acc_pen_up(struct wm97xx *wm)
msleep(1);
for (i = 0; i < 16; i++)
- MODR;
+ pxa2xx_ac97_read_modr();
}
static int wm97xx_acc_pen_down(struct wm97xx *wm)
@@ -101,7 +100,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
return RC_PENUP;
}
- x = MODR;
+ x = pxa2xx_ac97_read_modr();
if (x == last) {
tries++;
return RC_AGAIN;
@@ -109,10 +108,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
last = x;
do {
if (reads)
- x = MODR;
- y = MODR;
+ x = pxa2xx_ac97_read_modr();
+ y = pxa2xx_ac97_read_modr();
if (pressure)
- p = MODR;
+ p = pxa2xx_ac97_read_modr();
dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
x, y, p);
@@ -161,34 +160,28 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
return 0;
}
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
- if (enable)
- enable_irq(wm->pen_irq);
- else
- disable_irq_nosync(wm->pen_irq);
-}
-
static struct wm97xx_mach_ops zylonite_mach_ops = {
.acc_enabled = 1,
.acc_pen_up = wm97xx_acc_pen_up,
.acc_pen_down = wm97xx_acc_pen_down,
.acc_startup = wm97xx_acc_startup,
- .irq_enable = wm97xx_irq_enable,
.irq_gpio = WM97XX_GPIO_2,
};
static int zylonite_wm97xx_probe(struct platform_device *pdev)
{
struct wm97xx *wm = platform_get_drvdata(pdev);
- int gpio_touch_irq;
-
- if (cpu_is_pxa320())
- gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15);
- else
- gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
+ struct gpio_desc *gpio_touch_irq;
+ int err;
+
+ gpio_touch_irq = devm_gpiod_get(&pdev->dev, "touch", GPIOD_IN);
+ err = PTR_ERR_OR_ZERO(gpio_touch_irq);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get irq gpio: %d\n", err);
+ return err;
+ }
- wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+ wm->pen_irq = gpiod_to_irq(gpio_touch_irq);
irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 91353e651a52..22adff5d7f53 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -110,6 +110,15 @@ config INTERCONNECT_QCOM_SC8180X
This is a driver for the Qualcomm Network-on-Chip on sc8180x-based
platforms.
+config INTERCONNECT_QCOM_SC8280XP
+ tristate "Qualcomm SC8280XP interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SC8280XP-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM660
tristate "Qualcomm SDM660 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -137,6 +146,15 @@ config INTERCONNECT_QCOM_SDX55
This is a driver for the Qualcomm Network-on-Chip on sdx55-based
platforms.
+config INTERCONNECT_QCOM_SDX65
+ tristate "Qualcomm SDX65 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdx65-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index ceae9bb566c6..8d1fe9d38ac3 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -12,9 +12,11 @@ icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
qnoc-sc7280-objs := sc7280.o
qnoc-sc8180x-objs := sc8180x.o
+qnoc-sc8280xp-objs := sc8280xp.o
qnoc-sdm660-objs := sdm660.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
+qnoc-sdx65-objs := sdx65.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@@ -33,9 +35,11 @@ obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC8280XP) += qnoc-sc8280xp.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 34125e8f8b60..fb013191c29b 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -274,20 +274,19 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
do_div(rate, qn->buswidth);
rate = min_t(u64, rate, LONG_MAX);
- if (qn->rate == rate)
- return 0;
-
for (i = 0; i < qp->num_clks; i++) {
+ if (qp->bus_clk_rate[i] == rate)
+ continue;
+
ret = clk_set_rate(qp->bus_clks[i].clk, rate);
if (ret) {
pr_err("%s clk_set_rate error: %d\n",
qp->bus_clks[i].id, ret);
return ret;
}
+ qp->bus_clk_rate[i] = rate;
}
- qn->rate = rate;
-
return 0;
}
@@ -301,7 +300,7 @@ int qnoc_probe(struct platform_device *pdev)
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
+ struct qcom_icc_node * const *qnodes;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
@@ -332,6 +331,11 @@ int qnoc_probe(struct platform_device *pdev)
if (!qp)
return -ENOMEM;
+ qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
+ GFP_KERNEL);
+ if (!qp->bus_clk_rate)
+ return -ENOMEM;
+
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index 26dad006034f..ebee9009301e 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -26,6 +26,7 @@ enum qcom_icc_type {
* @type: the ICC provider type
* @qos_offset: offset to QoS registers
* @regmap: regmap for QoS registers read/write access
+ * @bus_clk_rate: bus clock rate in Hz
*/
struct qcom_icc_provider {
struct icc_provider provider;
@@ -33,6 +34,7 @@ struct qcom_icc_provider {
enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
+ u64 *bus_clk_rate;
struct clk_bulk_data bus_clks[];
};
@@ -66,7 +68,6 @@ struct qcom_icc_qos {
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
- * @rate: current bus clock rate in Hz
*/
struct qcom_icc_node {
unsigned char *name;
@@ -77,11 +78,10 @@ struct qcom_icc_node {
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;
- u64 rate;
};
struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
+ struct qcom_icc_node * const *nodes;
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 2c8e12549804..3c40076eb5fb 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -189,7 +189,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes, *qn;
+ struct qcom_icc_node * const *qnodes, *qn;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i, j;
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 4bfc060529ba..d29929461c17 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -22,7 +22,7 @@
struct qcom_icc_provider {
struct icc_provider provider;
struct device *dev;
- struct qcom_icc_bcm **bcms;
+ struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
struct bcm_voter *voter;
};
@@ -112,9 +112,9 @@ struct qcom_icc_fabric {
};
struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
+ struct qcom_icc_node * const *nodes;
size_t num_nodes;
- struct qcom_icc_bcm **bcms;
+ struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
};
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index 2f397a7c3322..5c4ba2f37c8e 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -1191,7 +1191,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
.links = snoc_pcnoc_slv_links,
};
-static struct qcom_icc_node *msm8916_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -1228,7 +1228,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_snoc = {
+static const struct qcom_icc_desc msm8916_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
@@ -1236,7 +1236,7 @@ static struct qcom_icc_desc msm8916_snoc = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8916_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8916_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1256,7 +1256,7 @@ static const struct regmap_config msm8916_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_bimc = {
+static const struct qcom_icc_desc msm8916_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
@@ -1264,7 +1264,7 @@ static struct qcom_icc_desc msm8916_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -1325,7 +1325,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_pcnoc = {
+static const struct qcom_icc_desc msm8916_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index f9c2d7d3100d..63b31deea722 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1251,7 +1251,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
.links = snoc_pcnoc_slv_links,
};
-static struct qcom_icc_node *msm8939_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
@@ -1281,7 +1281,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_snoc = {
+static const struct qcom_icc_desc msm8939_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
@@ -1289,7 +1289,7 @@ static struct qcom_icc_desc msm8939_snoc = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_mm_nodes[] = {
[MASTER_VIDEO_P0] = &mas_video,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_VFE] = &mas_vfe,
@@ -1301,7 +1301,7 @@ static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
[SNOC_MM_INT_2] = &mm_int_2,
};
-static struct qcom_icc_desc msm8939_snoc_mm = {
+static const struct qcom_icc_desc msm8939_snoc_mm = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
@@ -1309,7 +1309,7 @@ static struct qcom_icc_desc msm8939_snoc_mm = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8939_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8939_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1329,7 +1329,7 @@ static const struct regmap_config msm8939_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_bimc = {
+static const struct qcom_icc_desc msm8939_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
@@ -1337,7 +1337,7 @@ static struct qcom_icc_desc msm8939_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -1400,7 +1400,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_pcnoc = {
+static const struct qcom_icc_desc msm8939_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index da68ce375a89..6fa0ad90fc3d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -220,7 +220,7 @@ struct msm8974_icc_node {
};
struct msm8974_icc_desc {
- struct msm8974_icc_node **nodes;
+ struct msm8974_icc_node * const *nodes;
size_t num_nodes;
};
@@ -244,7 +244,7 @@ DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC,
DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
-static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_bimc_nodes[] = {
[BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
[BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
[BIMC_MAS_MSS_PROC] = &mas_mss_proc,
@@ -254,7 +254,7 @@ static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
[BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
};
-static struct msm8974_icc_desc msm8974_bimc = {
+static const struct msm8974_icc_desc msm8974_bimc = {
.nodes = msm8974_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
};
@@ -297,7 +297,7 @@ DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
-static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_cnoc_nodes[] = {
[CNOC_MAS_RPM_INST] = &mas_rpm_inst,
[CNOC_MAS_RPM_DATA] = &mas_rpm_data,
[CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
@@ -337,7 +337,7 @@ static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
[CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
};
-static struct msm8974_icc_desc msm8974_cnoc = {
+static const struct msm8974_icc_desc msm8974_cnoc = {
.nodes = msm8974_cnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
};
@@ -365,7 +365,7 @@ DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
-static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_mnoc_nodes[] = {
[MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
[MNOC_MAS_JPEG] = &mas_jpeg,
[MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
@@ -390,7 +390,7 @@ static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
[MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
};
-static struct msm8974_icc_desc msm8974_mnoc = {
+static const struct msm8974_icc_desc msm8974_mnoc = {
.nodes = msm8974_mnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
};
@@ -410,7 +410,7 @@ DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MS
DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
-static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_onoc_nodes[] = {
[OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
[OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
[OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
@@ -425,7 +425,7 @@ static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
[OCMEM_SLV_OCMEM] = &slv_ocmem,
};
-static struct msm8974_icc_desc msm8974_onoc = {
+static const struct msm8974_icc_desc msm8974_onoc = {
.nodes = msm8974_onoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
};
@@ -458,7 +458,7 @@ DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
-static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_pnoc_nodes[] = {
[PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
[PNOC_MAS_SDCC_1] = &mas_sdcc_1,
[PNOC_MAS_SDCC_3] = &mas_sdcc_3,
@@ -488,7 +488,7 @@ static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
[PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
};
-static struct msm8974_icc_desc msm8974_pnoc = {
+static const struct msm8974_icc_desc msm8974_pnoc = {
.nodes = msm8974_pnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
};
@@ -518,7 +518,7 @@ DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
-static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_snoc_nodes[] = {
[SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
[SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
[SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
@@ -545,7 +545,7 @@ static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
[SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
};
-static struct msm8974_icc_desc msm8974_snoc = {
+static const struct msm8974_icc_desc msm8974_snoc = {
.nodes = msm8974_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
};
@@ -648,7 +648,7 @@ static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
static int msm8974_icc_probe(struct platform_device *pdev)
{
const struct msm8974_icc_desc *desc;
- struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_node * const *qnodes;
struct msm8974_icc_provider *qp;
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index 499e11fbbd2e..c2903ae3b3bc 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -1796,7 +1796,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
-static struct qcom_icc_node *a0noc_nodes[] = {
+static struct qcom_icc_node * const a0noc_nodes[] = {
[MASTER_PCIE_0] = &mas_pcie_0,
[MASTER_PCIE_1] = &mas_pcie_1,
[MASTER_PCIE_2] = &mas_pcie_2
@@ -1820,7 +1820,7 @@ static const struct qcom_icc_desc msm8996_a0noc = {
.regmap_cfg = &msm8996_a0noc_regmap_config
};
-static struct qcom_icc_node *a1noc_nodes[] = {
+static struct qcom_icc_node * const a1noc_nodes[] = {
[MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
[MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
[MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
@@ -1841,7 +1841,7 @@ static const struct qcom_icc_desc msm8996_a1noc = {
.regmap_cfg = &msm8996_a1noc_regmap_config
};
-static struct qcom_icc_node *a2noc_nodes[] = {
+static struct qcom_icc_node * const a2noc_nodes[] = {
[MASTER_USB3] = &mas_usb3,
[MASTER_IPA] = &mas_ipa,
[MASTER_UFS] = &mas_ufs
@@ -1862,7 +1862,7 @@ static const struct qcom_icc_desc msm8996_a2noc = {
.regmap_cfg = &msm8996_a2noc_regmap_config
};
-static struct qcom_icc_node *bimc_nodes[] = {
+static struct qcom_icc_node * const bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_GRAPHICS_3D] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1888,7 +1888,7 @@ static const struct qcom_icc_desc msm8996_bimc = {
.regmap_cfg = &msm8996_bimc_regmap_config
};
-static struct qcom_icc_node *cnoc_nodes[] = {
+static struct qcom_icc_node * const cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
@@ -1946,7 +1946,7 @@ static const struct qcom_icc_desc msm8996_cnoc = {
.regmap_cfg = &msm8996_cnoc_regmap_config
};
-static struct qcom_icc_node *mnoc_nodes[] = {
+static struct qcom_icc_node * const mnoc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
@@ -2001,7 +2001,7 @@ static const struct qcom_icc_desc msm8996_mnoc = {
.regmap_cfg = &msm8996_mnoc_regmap_config
};
-static struct qcom_icc_node *pnoc_nodes[] = {
+static struct qcom_icc_node * const pnoc_nodes[] = {
[MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
@@ -2037,7 +2037,7 @@ static const struct qcom_icc_desc msm8996_pnoc = {
.regmap_cfg = &msm8996_pnoc_regmap_config
};
-static struct qcom_icc_node *snoc_nodes[] = {
+static struct qcom_icc_node * const snoc_nodes[] = {
[MASTER_HMSS] = &mas_hmss,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index eec13099a6a3..4198656f4e59 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -67,7 +67,7 @@ struct qcom_osm_l3_node {
};
struct qcom_osm_l3_desc {
- const struct qcom_osm_l3_node **nodes;
+ const struct qcom_osm_l3_node * const *nodes;
size_t num_nodes;
unsigned int lut_row_size;
unsigned int reg_freq_lut;
@@ -86,7 +86,7 @@ struct qcom_osm_l3_desc {
DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
-static const struct qcom_osm_l3_node *sdm845_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sdm845_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
[SLAVE_OSM_L3] = &sdm845_osm_l3,
};
@@ -102,7 +102,7 @@ static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = {
DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
-static const struct qcom_osm_l3_node *sc7180_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7180_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
[SLAVE_OSM_L3] = &sc7180_osm_l3,
};
@@ -118,7 +118,7 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = {
DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3);
DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32);
-static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7280_epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3,
[SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3,
};
@@ -134,7 +134,7 @@ static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = {
DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3);
DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32);
-static const struct qcom_osm_l3_node *sc8180x_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc8180x_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3,
[SLAVE_OSM_L3] = &sc8180x_osm_l3,
};
@@ -150,7 +150,7 @@ static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = {
DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3);
DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32);
-static const struct qcom_osm_l3_node *sm8150_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8150_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3,
[SLAVE_OSM_L3] = &sm8150_osm_l3,
};
@@ -166,7 +166,7 @@ static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = {
DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3);
DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32);
-static const struct qcom_osm_l3_node *sm8250_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8250_epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3,
[SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3,
};
@@ -228,7 +228,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
const struct qcom_osm_l3_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- const struct qcom_osm_l3_node **qnodes;
+ const struct qcom_osm_l3_node * const *qnodes;
struct icc_node *node;
size_t num_nodes;
struct clk *clk;
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 74404e0b2080..0da612d6398c 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -1174,7 +1174,7 @@ static struct qcom_icc_node slv_anoc_snoc = {
};
/* NoC descriptors */
-static struct qcom_icc_node *qcm2290_bimc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_bimc_nodes[] = {
[MASTER_APPSS_PROC] = &mas_appss_proc,
[MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
[MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
@@ -1193,7 +1193,7 @@ static const struct regmap_config qcm2290_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_bimc = {
+static const struct qcom_icc_desc qcm2290_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = qcm2290_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
@@ -1202,7 +1202,7 @@ static struct qcom_icc_desc qcm2290_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *qcm2290_cnoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
@@ -1248,14 +1248,14 @@ static const struct regmap_config qcm2290_cnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_cnoc = {
+static const struct qcom_icc_desc qcm2290_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = qcm2290_cnoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
.regmap_cfg = &qcm2290_cnoc_regmap_config,
};
-static struct qcom_icc_node *qcm2290_snoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_snoc_nodes[] = {
[MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_TIC] = &mas_tic,
@@ -1289,7 +1289,7 @@ static const struct regmap_config qcm2290_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_snoc = {
+static const struct qcom_icc_desc qcm2290_snoc = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
@@ -1298,25 +1298,25 @@ static struct qcom_icc_desc qcm2290_snoc = {
.qos_offset = 0x15000,
};
-static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &mas_qup_core_0,
[SLAVE_QUP_CORE_0] = &slv_qup_core_0
};
-static struct qcom_icc_desc qcm2290_qup_virt = {
+static const struct qcom_icc_desc qcm2290_qup_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
};
-static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = {
[MASTER_CAMNOC_SF] = &mas_camnoc_sf,
[MASTER_VIDEO_P0] = &mas_video_p0,
[MASTER_VIDEO_PROC] = &mas_video_proc,
[SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
};
-static struct qcom_icc_desc qcm2290_mmnrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmnrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
@@ -1324,13 +1324,13 @@ static struct qcom_icc_desc qcm2290_mmnrt_virt = {
.qos_offset = 0x15000,
};
-static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = {
[MASTER_CAMNOC_HF] = &mas_camnoc_hf,
[MASTER_MDP0] = &mas_mdp0,
[SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
};
-static struct qcom_icc_desc qcm2290_mmrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 416c8bff8efa..fae155344332 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -974,7 +974,7 @@ static struct qcom_icc_node slv_lpass = {
.slv_rpm_id = -1,
};
-static struct qcom_icc_node *qcs404_bimc_nodes[] = {
+static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -984,12 +984,12 @@ static struct qcom_icc_node *qcs404_bimc_nodes[] = {
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
-static struct qcom_icc_desc qcs404_bimc = {
+static const struct qcom_icc_desc qcs404_bimc = {
.nodes = qcs404_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
};
-static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
[MASTER_SPDM] = &mas_spdm,
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_BLSP_2] = &mas_blsp_2,
@@ -1038,12 +1038,12 @@ static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
[SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
};
-static struct qcom_icc_desc qcs404_pcnoc = {
+static const struct qcom_icc_desc qcs404_pcnoc = {
.nodes = qcs404_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
};
-static struct qcom_icc_node *qcs404_snoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_BIMC_SNOC] = &mas_bimc_snoc,
[MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
@@ -1066,7 +1066,7 @@ static struct qcom_icc_node *qcs404_snoc_nodes[] = {
[SLAVE_LPASS] = &slv_lpass,
};
-static struct qcom_icc_desc qcs404_snoc = {
+static const struct qcom_icc_desc qcs404_snoc = {
.nodes = qcs404_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 5f7c0f85fa8e..35cd448efdfb 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -178,11 +178,11 @@ DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup_0,
@@ -193,18 +193,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sc7180_aggre1_noc = {
+static const struct qcom_icc_desc sc7180_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_1] = &qhm_qup_1,
@@ -216,56 +216,56 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sc7180_aggre2_noc = {
+static const struct qcom_icc_desc sc7180_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
-static struct qcom_icc_desc sc7180_camnoc_virt = {
+static const struct qcom_icc_desc sc7180_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
&bcm_co3,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[MASTER_NPU_PROC] = &qxm_npu_dsp,
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
-static struct qcom_icc_desc sc7180_compute_noc = {
+static const struct qcom_icc_desc sc7180_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SNOC_CNOC] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -321,32 +321,32 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sc7180_config_noc = {
+static const struct qcom_icc_desc sc7180_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
-static struct qcom_icc_desc sc7180_dc_noc = {
+static const struct qcom_icc_desc sc7180_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_apps0,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
@@ -362,7 +362,7 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
-static struct qcom_icc_desc sc7180_gem_noc = {
+static const struct qcom_icc_desc sc7180_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
@@ -374,25 +374,25 @@ static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
-static struct qcom_icc_desc sc7180_mc_virt = {
+static const struct qcom_icc_desc sc7180_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -406,14 +406,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sc7180_mmss_noc = {
+static const struct qcom_icc_desc sc7180_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
@@ -427,30 +427,30 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
-static struct qcom_icc_desc sc7180_npu_noc = {
+static const struct qcom_icc_desc sc7180_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
};
-static struct qcom_icc_bcm *qup_virt_bcms[] = {
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
-static struct qcom_icc_node *qup_virt_nodes[] = {
+static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup_core_master_1,
[MASTER_QUP_CORE_1] = &qup_core_master_2,
[SLAVE_QUP_CORE_0] = &qup_core_slave_1,
[SLAVE_QUP_CORE_1] = &qup_core_slave_2,
};
-static struct qcom_icc_desc sc7180_qup_virt = {
+static const struct qcom_icc_desc sc7180_qup_virt = {
.nodes = qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qup_virt_nodes),
.bcms = qup_virt_bcms,
.num_bcms = ARRAY_SIZE(qup_virt_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -461,7 +461,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn12,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -478,7 +478,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sc7180_system_noc = {
+static const struct qcom_icc_desc sc7180_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index f8b34f6cbb0d..971f538bc98a 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -1476,13 +1476,13 @@ static struct qcom_icc_bcm bcm_sn14 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
@@ -1500,18 +1500,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sc7280_aggre1_noc = {
+static const struct qcom_icc_desc sc7280_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
@@ -1522,38 +1522,38 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sc7280_aggre2_noc = {
+static const struct qcom_icc_desc sc7280_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
};
-static struct qcom_icc_desc sc7280_clk_virt = {
+static const struct qcom_icc_desc sc7280_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *cnoc2_bcms[] = {
+static struct qcom_icc_bcm * const cnoc2_bcms[] = {
&bcm_cn1,
&bcm_cn2,
};
-static struct qcom_icc_node *cnoc2_nodes[] = {
+static struct qcom_icc_node * const cnoc2_nodes[] = {
[MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1603,21 +1603,21 @@ static struct qcom_icc_node *cnoc2_nodes[] = {
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
};
-static struct qcom_icc_desc sc7280_cnoc2 = {
+static const struct qcom_icc_desc sc7280_cnoc2 = {
.nodes = cnoc2_nodes,
.num_nodes = ARRAY_SIZE(cnoc2_nodes),
.bcms = cnoc2_bcms,
.num_bcms = ARRAY_SIZE(cnoc2_bcms),
};
-static struct qcom_icc_bcm *cnoc3_bcms[] = {
+static struct qcom_icc_bcm * const cnoc3_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_sn3,
&bcm_sn4,
};
-static struct qcom_icc_node *cnoc3_nodes[] = {
+static struct qcom_icc_node * const cnoc3_nodes[] = {
[MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3,
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
@@ -1635,37 +1635,37 @@ static struct qcom_icc_node *cnoc3_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sc7280_cnoc3 = {
+static const struct qcom_icc_desc sc7280_cnoc3 = {
.nodes = cnoc3_nodes,
.num_nodes = ARRAY_SIZE(cnoc3_nodes),
.bcms = cnoc3_bcms,
.num_bcms = ARRAY_SIZE(cnoc3_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
-static struct qcom_icc_desc sc7280_dc_noc = {
+static const struct qcom_icc_desc sc7280_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -1687,17 +1687,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
-static struct qcom_icc_desc sc7280_gem_noc = {
+static const struct qcom_icc_desc sc7280_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -1707,38 +1707,38 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sc7280_lpass_ag_noc = {
+static const struct qcom_icc_desc sc7280_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
-static struct qcom_icc_desc sc7280_mc_virt = {
+static const struct qcom_icc_desc sc7280_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_PROC] = &qnm_video_cpu,
@@ -1751,40 +1751,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sc7280_mmss_noc = {
+static const struct qcom_icc_desc sc7280_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sc7280_nsp_noc = {
+static const struct qcom_icc_desc sc7280_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -1795,7 +1795,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sc7280_system_noc = {
+static const struct qcom_icc_desc sc7280_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index e9adf05b9330..8e32ca958824 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -15,229 +15,1611 @@
#include "icc-rpmh.h"
#include "sc8180x.h"
-DEFINE_QNODE(mas_qhm_a1noc_cfg, SC8180X_MASTER_A1NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(mas_xm_ufs_card, SC8180X_MASTER_UFS_CARD, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_g4, SC8180X_MASTER_UFS_GEN4, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_mem, SC8180X_MASTER_UFS_MEM, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_0, SC8180X_MASTER_USB3, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_1, SC8180X_MASTER_USB3_1, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_2, SC8180X_MASTER_USB3_2, 1, 16, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_a2noc_cfg, SC8180X_MASTER_A2NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(mas_qhm_qdss_bam, SC8180X_MASTER_QDSS_BAM, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi, SC8180X_MASTER_QSPI_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi1, SC8180X_MASTER_QSPI_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup0, SC8180X_MASTER_QUP_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup1, SC8180X_MASTER_QUP_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup2, SC8180X_MASTER_QUP_2, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_sensorss_ahb, SC8180X_MASTER_SENSORS_AHB, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_crypto, SC8180X_MASTER_CRYPTO_CORE_0, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_ipa, SC8180X_MASTER_IPA, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_emac, SC8180X_MASTER_EMAC, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_pcie3_0, SC8180X_MASTER_PCIE, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_1, SC8180X_MASTER_PCIE_1, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_2, SC8180X_MASTER_PCIE_2, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_3, SC8180X_MASTER_PCIE_3, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_qdss_etr, SC8180X_MASTER_QDSS_ETR, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc2, SC8180X_MASTER_SDCC_2, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc4, SC8180X_MASTER_SDCC_4, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_camnoc_hf0_uncomp, SC8180X_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_hf1_uncomp, SC8180X_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_sf_uncomp, SC8180X_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qnm_npu, SC8180X_MASTER_NPU, 1, 32, SC8180X_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(mas_qnm_snoc, SC8180X_SNOC_CNOC_MAS, 1, 8, SC8180X_SLAVE_TLMM_SOUTH, SC8180X_SLAVE_CDSP_CFG, SC8180X_SLAVE_SPSS_CFG, SC8180X_SLAVE_CAMERA_CFG, SC8180X_SLAVE_SDCC_4, SC8180X_SLAVE_AHB2PHY_CENTER, SC8180X_SLAVE_SDCC_2, SC8180X_SLAVE_PCIE_2_CFG, SC8180X_SLAVE_CNOC_MNOC_CFG, SC8180X_SLAVE_EMAC_CFG, SC8180X_SLAVE_QSPI_0, SC8180X_SLAVE_QSPI_1, SC8180X_SLAVE_TLMM_EAST, SC8180X_SLAVE_SNOC_CFG, SC8180X_SLAVE_AHB2PHY_EAST, SC8180X_SLAVE_GLM, SC8180X_SLAVE_PDM, SC8180X_SLAVE_PCIE_1_CFG, SC8180X_SLAVE_A2NOC_CFG, SC8180X_SLAVE_QDSS_CFG, SC8180X_SLAVE_DISPLAY_CFG, SC8180X_SLAVE_TCSR, SC8180X_SLAVE_UFS_MEM_0_CFG, SC8180X_SLAVE_CNOC_DDRSS, SC8180X_SLAVE_PCIE_0_CFG, SC8180X_SLAVE_QUP_1, SC8180X_SLAVE_QUP_2, SC8180X_SLAVE_NPU_CFG, SC8180X_SLAVE_CRYPTO_0_CFG, SC8180X_SLAVE_GRAPHICS_3D_CFG, SC8180X_SLAVE_VENUS_CFG, SC8180X_SLAVE_TSIF, SC8180X_SLAVE_IPA_CFG, SC8180X_SLAVE_CLK_CTL, SC8180X_SLAVE_SECURITY, SC8180X_SLAVE_AOP, SC8180X_SLAVE_AHB2PHY_WEST, SC8180X_SLAVE_AHB2PHY_SOUTH, SC8180X_SLAVE_SERVICE_CNOC, SC8180X_SLAVE_UFS_CARD_CFG, SC8180X_SLAVE_USB3_1, SC8180X_SLAVE_USB3_2, SC8180X_SLAVE_PCIE_3_CFG, SC8180X_SLAVE_RBCPR_CX_CFG, SC8180X_SLAVE_TLMM_WEST, SC8180X_SLAVE_A1NOC_CFG, SC8180X_SLAVE_AOSS, SC8180X_SLAVE_PRNG, SC8180X_SLAVE_VSENSE_CTRL_CFG, SC8180X_SLAVE_QUP_0, SC8180X_SLAVE_USB3, SC8180X_SLAVE_RBCPR_MMCX_CFG, SC8180X_SLAVE_PIMEM_CFG, SC8180X_SLAVE_UFS_MEM_1_CFG, SC8180X_SLAVE_RBCPR_MX_CFG, SC8180X_SLAVE_IMEM_CFG);
-DEFINE_QNODE(mas_qhm_cnoc_dc_noc, SC8180X_MASTER_CNOC_DC_NOC, 1, 4, SC8180X_SLAVE_LLCC_CFG, SC8180X_SLAVE_GEM_NOC_CFG);
-DEFINE_QNODE(mas_acm_apps, SC8180X_MASTER_AMPSS_M0, 4, 64, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_gpu_tcu, SC8180X_MASTER_GPU_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_sys_tcu, SC8180X_MASTER_SYS_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qhm_gemnoc_cfg, SC8180X_MASTER_GEM_NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_GEM_NOC_1, SC8180X_SLAVE_SERVICE_GEM_NOC, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(mas_qnm_cmpnoc, SC8180X_MASTER_COMPUTE_NOC, 2, 32, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_gpu, SC8180X_MASTER_GRAPHICS_3D, 4, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_mnoc_hf, SC8180X_MASTER_MNOC_HF_MEM_NOC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_mnoc_sf, SC8180X_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_pcie, SC8180X_MASTER_GEM_NOC_PCIE_SNOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_snoc_gc, SC8180X_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_snoc_sf, SC8180X_MASTER_SNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qxm_ecc, SC8180X_MASTER_ECC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_ipa_core_master, SC8180X_MASTER_IPA_CORE, 1, 8, SC8180X_SLAVE_IPA_CORE);
-DEFINE_QNODE(mas_llcc_mc, SC8180X_MASTER_LLCC, 8, 4, SC8180X_SLAVE_EBI_CH0);
-DEFINE_QNODE(mas_qhm_mnoc_cfg, SC8180X_MASTER_CNOC_MNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf0, SC8180X_MASTER_CAMNOC_HF0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf1, SC8180X_MASTER_CAMNOC_HF1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_sf, SC8180X_MASTER_CAMNOC_SF, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp0, SC8180X_MASTER_MDP_PORT0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp1, SC8180X_MASTER_MDP_PORT1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_rot, SC8180X_MASTER_ROTATOR, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus0, SC8180X_MASTER_VIDEO_P0, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus1, SC8180X_MASTER_VIDEO_P1, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus_arm9, SC8180X_MASTER_VIDEO_PROC, 1, 8, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qhm_snoc_cfg, SC8180X_MASTER_SNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(mas_qnm_aggre1_noc, SC8180X_A1NOC_SNOC_MAS, 1, 32, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_PCIE_3, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SLAVE_PCIE_2, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_PCIE_0, SC8180X_SLAVE_PCIE_1, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS);
-DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS);
-DEFINE_QNODE(slv_qns_pcie_mem_noc, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, 1, 32, SC8180X_MASTER_GEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(slv_srvc_aggre2_noc, SC8180X_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(slv_qns_camnoc_uncomp, SC8180X_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(slv_qns_cdsp_mem_noc, SC8180X_SLAVE_CDSP_MEM_NOC, 2, 32, SC8180X_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(slv_qhs_a1_noc_cfg, SC8180X_SLAVE_A1NOC_CFG, 1, 4, SC8180X_MASTER_A1NOC_CFG);
-DEFINE_QNODE(slv_qhs_a2_noc_cfg, SC8180X_SLAVE_A2NOC_CFG, 1, 4, SC8180X_MASTER_A2NOC_CFG);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_center, SC8180X_SLAVE_AHB2PHY_CENTER, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_east, SC8180X_SLAVE_AHB2PHY_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_west, SC8180X_SLAVE_AHB2PHY_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_south, SC8180X_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_aop, SC8180X_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(slv_qhs_aoss, SC8180X_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(slv_qhs_camera_cfg, SC8180X_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_clk_ctl, SC8180X_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(slv_qhs_compute_dsp, SC8180X_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_cx, SC8180X_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mmcx, SC8180X_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mx, SC8180X_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_crypto0_cfg, SC8180X_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ddrss_cfg, SC8180X_SLAVE_CNOC_DDRSS, 1, 4, SC8180X_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(slv_qhs_display_cfg, SC8180X_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_emac_cfg, SC8180X_SLAVE_EMAC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_glm, SC8180X_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(slv_qhs_gpuss_cfg, SC8180X_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(slv_qhs_imem_cfg, SC8180X_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ipa, SC8180X_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mnoc_cfg, SC8180X_SLAVE_CNOC_MNOC_CFG, 1, 4, SC8180X_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(slv_qhs_npu_cfg, SC8180X_SLAVE_NPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie0_cfg, SC8180X_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie1_cfg, SC8180X_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie2_cfg, SC8180X_SLAVE_PCIE_2_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie3_cfg, SC8180X_SLAVE_PCIE_3_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pdm, SC8180X_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(slv_qhs_pimem_cfg, SC8180X_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_prng, SC8180X_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(slv_qhs_qdss_cfg, SC8180X_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_0, SC8180X_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_1, SC8180X_SLAVE_QSPI_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east0, SC8180X_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east1, SC8180X_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_west, SC8180X_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc2, SC8180X_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc4, SC8180X_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(slv_qhs_security, SC8180X_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(slv_qhs_snoc_cfg, SC8180X_SLAVE_SNOC_CFG, 1, 4, SC8180X_MASTER_SNOC_CFG);
-DEFINE_QNODE(slv_qhs_spss_cfg, SC8180X_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_tcsr, SC8180X_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_east, SC8180X_SLAVE_TLMM_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_south, SC8180X_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_west, SC8180X_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_tsif, SC8180X_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_card_cfg, SC8180X_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem0_cfg, SC8180X_SLAVE_UFS_MEM_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem1_cfg, SC8180X_SLAVE_UFS_MEM_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_0, SC8180X_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_1, SC8180X_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_2, SC8180X_SLAVE_USB3_2, 1, 4);
-DEFINE_QNODE(slv_qhs_venus_cfg, SC8180X_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_vsense_ctrl_cfg, SC8180X_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(slv_srvc_cnoc, SC8180X_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_gemnoc, SC8180X_SLAVE_GEM_NOC_CFG, 1, 4, SC8180X_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(slv_qhs_llcc, SC8180X_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mdsp_ms_mpu_cfg, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qns_ecc, SC8180X_SLAVE_ECC, 1, 32);
-DEFINE_QNODE(slv_qns_gem_noc_snoc, SC8180X_SLAVE_GEM_NOC_SNOC, 1, 8, SC8180X_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(slv_qns_llcc, SC8180X_SLAVE_LLCC, 8, 16, SC8180X_MASTER_LLCC);
-DEFINE_QNODE(slv_srvc_gemnoc, SC8180X_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(slv_srvc_gemnoc1, SC8180X_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
-DEFINE_QNODE(slv_ipa_core_slave, SC8180X_SLAVE_IPA_CORE, 1, 8);
-DEFINE_QNODE(slv_ebi, SC8180X_SLAVE_EBI_CH0, 8, 4);
-DEFINE_QNODE(slv_qns2_mem_noc, SC8180X_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC8180X_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qns_mem_noc_hf, SC8180X_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SC8180X_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(slv_srvc_mnoc, SC8180X_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_apss, SC8180X_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(slv_qns_cnoc, SC8180X_SNOC_CNOC_SLV, 1, 8, SC8180X_SNOC_CNOC_MAS);
-DEFINE_QNODE(slv_qns_gemnoc_gc, SC8180X_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC8180X_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(slv_qns_gemnoc_sf, SC8180X_SLAVE_SNOC_GEM_NOC_SF, 1, 32, SC8180X_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qxs_imem, SC8180X_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(slv_qxs_pimem, SC8180X_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(slv_srvc_snoc, SC8180X_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(slv_xs_pcie_0, SC8180X_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_1, SC8180X_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8);
-DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", false, &slv_qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, &slv_qns_mem_noc_hf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", false, &slv_qns_gemnoc_sf);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &slv_qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &slv_qns_gemnoc_gc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &mas_qnm_npu);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &slv_ipa_core_slave);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &slv_srvc_aggre1_noc, &slv_qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &slv_qxs_pimem);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &slv_xs_pcie_0, &slv_xs_pcie_1, &slv_xs_pcie_2, &slv_xs_pcie_3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &mas_qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &mas_qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &slv_qns_pcie_mem_noc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc);
-
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_node mas_qhm_a1noc_cfg = {
+ .name = "mas_qhm_a1noc_cfg",
+ .id = SC8180X_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_A1NOC }
+};
+
+static struct qcom_icc_node mas_xm_ufs_card = {
+ .name = "mas_xm_ufs_card",
+ .id = SC8180X_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_g4 = {
+ .name = "mas_xm_ufs_g4",
+ .id = SC8180X_MASTER_UFS_GEN4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_mem = {
+ .name = "mas_xm_ufs_mem",
+ .id = SC8180X_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_0 = {
+ .name = "mas_xm_usb3_0",
+ .id = SC8180X_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_1 = {
+ .name = "mas_xm_usb3_1",
+ .id = SC8180X_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_2 = {
+ .name = "mas_xm_usb3_2",
+ .id = SC8180X_MASTER_USB3_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_a2noc_cfg = {
+ .name = "mas_qhm_a2noc_cfg",
+ .id = SC8180X_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_A2NOC }
+};
+
+static struct qcom_icc_node mas_qhm_qdss_bam = {
+ .name = "mas_qhm_qdss_bam",
+ .id = SC8180X_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi = {
+ .name = "mas_qhm_qspi",
+ .id = SC8180X_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi1 = {
+ .name = "mas_qhm_qspi1",
+ .id = SC8180X_MASTER_QSPI_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup0 = {
+ .name = "mas_qhm_qup0",
+ .id = SC8180X_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup1 = {
+ .name = "mas_qhm_qup1",
+ .id = SC8180X_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup2 = {
+ .name = "mas_qhm_qup2",
+ .id = SC8180X_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_sensorss_ahb = {
+ .name = "mas_qhm_sensorss_ahb",
+ .id = SC8180X_MASTER_SENSORS_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_crypto = {
+ .name = "mas_qxm_crypto",
+ .id = SC8180X_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_ipa = {
+ .name = "mas_qxm_ipa",
+ .id = SC8180X_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_emac = {
+ .name = "mas_xm_emac",
+ .id = SC8180X_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_0 = {
+ .name = "mas_xm_pcie3_0",
+ .id = SC8180X_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_1 = {
+ .name = "mas_xm_pcie3_1",
+ .id = SC8180X_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_2 = {
+ .name = "mas_xm_pcie3_2",
+ .id = SC8180X_MASTER_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_3 = {
+ .name = "mas_xm_pcie3_3",
+ .id = SC8180X_MASTER_PCIE_3,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_qdss_etr = {
+ .name = "mas_xm_qdss_etr",
+ .id = SC8180X_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc2 = {
+ .name = "mas_xm_sdc2",
+ .id = SC8180X_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc4 = {
+ .name = "mas_xm_sdc4",
+ .id = SC8180X_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = {
+ .name = "mas_qxm_camnoc_hf0_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = {
+ .name = "mas_qxm_camnoc_hf1_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = {
+ .name = "mas_qxm_camnoc_sf_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qnm_npu = {
+ .name = "mas_qnm_npu",
+ .id = SC8180X_MASTER_NPU,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CDSP_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc = {
+ .name = "mas_qnm_snoc",
+ .id = SC8180X_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 56,
+ .links = { SC8180X_SLAVE_TLMM_SOUTH,
+ SC8180X_SLAVE_CDSP_CFG,
+ SC8180X_SLAVE_SPSS_CFG,
+ SC8180X_SLAVE_CAMERA_CFG,
+ SC8180X_SLAVE_SDCC_4,
+ SC8180X_SLAVE_AHB2PHY_CENTER,
+ SC8180X_SLAVE_SDCC_2,
+ SC8180X_SLAVE_PCIE_2_CFG,
+ SC8180X_SLAVE_CNOC_MNOC_CFG,
+ SC8180X_SLAVE_EMAC_CFG,
+ SC8180X_SLAVE_QSPI_0,
+ SC8180X_SLAVE_QSPI_1,
+ SC8180X_SLAVE_TLMM_EAST,
+ SC8180X_SLAVE_SNOC_CFG,
+ SC8180X_SLAVE_AHB2PHY_EAST,
+ SC8180X_SLAVE_GLM,
+ SC8180X_SLAVE_PDM,
+ SC8180X_SLAVE_PCIE_1_CFG,
+ SC8180X_SLAVE_A2NOC_CFG,
+ SC8180X_SLAVE_QDSS_CFG,
+ SC8180X_SLAVE_DISPLAY_CFG,
+ SC8180X_SLAVE_TCSR,
+ SC8180X_SLAVE_UFS_MEM_0_CFG,
+ SC8180X_SLAVE_CNOC_DDRSS,
+ SC8180X_SLAVE_PCIE_0_CFG,
+ SC8180X_SLAVE_QUP_1,
+ SC8180X_SLAVE_QUP_2,
+ SC8180X_SLAVE_NPU_CFG,
+ SC8180X_SLAVE_CRYPTO_0_CFG,
+ SC8180X_SLAVE_GRAPHICS_3D_CFG,
+ SC8180X_SLAVE_VENUS_CFG,
+ SC8180X_SLAVE_TSIF,
+ SC8180X_SLAVE_IPA_CFG,
+ SC8180X_SLAVE_CLK_CTL,
+ SC8180X_SLAVE_SECURITY,
+ SC8180X_SLAVE_AOP,
+ SC8180X_SLAVE_AHB2PHY_WEST,
+ SC8180X_SLAVE_AHB2PHY_SOUTH,
+ SC8180X_SLAVE_SERVICE_CNOC,
+ SC8180X_SLAVE_UFS_CARD_CFG,
+ SC8180X_SLAVE_USB3_1,
+ SC8180X_SLAVE_USB3_2,
+ SC8180X_SLAVE_PCIE_3_CFG,
+ SC8180X_SLAVE_RBCPR_CX_CFG,
+ SC8180X_SLAVE_TLMM_WEST,
+ SC8180X_SLAVE_A1NOC_CFG,
+ SC8180X_SLAVE_AOSS,
+ SC8180X_SLAVE_PRNG,
+ SC8180X_SLAVE_VSENSE_CTRL_CFG,
+ SC8180X_SLAVE_QUP_0,
+ SC8180X_SLAVE_USB3,
+ SC8180X_SLAVE_RBCPR_MMCX_CFG,
+ SC8180X_SLAVE_PIMEM_CFG,
+ SC8180X_SLAVE_UFS_MEM_1_CFG,
+ SC8180X_SLAVE_RBCPR_MX_CFG,
+ SC8180X_SLAVE_IMEM_CFG }
+};
+
+static struct qcom_icc_node mas_qhm_cnoc_dc_noc = {
+ .name = "mas_qhm_cnoc_dc_noc",
+ .id = SC8180X_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC_CFG,
+ SC8180X_SLAVE_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node mas_acm_apps = {
+ .name = "mas_acm_apps",
+ .id = SC8180X_MASTER_AMPSS_M0,
+ .channels = 4,
+ .buswidth = 64,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_ECC,
+ SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_gpu_tcu = {
+ .name = "mas_acm_gpu_tcu",
+ .id = SC8180X_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_sys_tcu = {
+ .name = "mas_acm_sys_tcu",
+ .id = SC8180X_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qhm_gemnoc_cfg = {
+ .name = "mas_qhm_gemnoc_cfg",
+ .id = SC8180X_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+ SC8180X_SLAVE_SERVICE_GEM_NOC,
+ SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG }
+};
+
+static struct qcom_icc_node mas_qnm_cmpnoc = {
+ .name = "mas_qnm_cmpnoc",
+ .id = SC8180X_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_ECC,
+ SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_gpu = {
+ .name = "mas_qnm_gpu",
+ .id = SC8180X_MASTER_GRAPHICS_3D,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_hf = {
+ .name = "mas_qnm_mnoc_hf",
+ .id = SC8180X_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_sf = {
+ .name = "mas_qnm_mnoc_sf",
+ .id = SC8180X_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_pcie = {
+ .name = "mas_qnm_pcie",
+ .id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_gc = {
+ .name = "mas_qnm_snoc_gc",
+ .id = SC8180X_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_sf = {
+ .name = "mas_qnm_snoc_sf",
+ .id = SC8180X_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qxm_ecc = {
+ .name = "mas_qxm_ecc",
+ .id = SC8180X_MASTER_ECC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_ipa_core_master = {
+ .name = "mas_ipa_core_master",
+ .id = SC8180X_MASTER_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_IPA_CORE }
+};
+
+static struct qcom_icc_node mas_llcc_mc = {
+ .name = "mas_llcc_mc",
+ .id = SC8180X_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_EBI_CH0 }
+};
+
+static struct qcom_icc_node mas_qhm_mnoc_cfg = {
+ .name = "mas_qhm_mnoc_cfg",
+ .id = SC8180X_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_MNOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0 = {
+ .name = "mas_qxm_camnoc_hf0",
+ .id = SC8180X_MASTER_CAMNOC_HF0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1 = {
+ .name = "mas_qxm_camnoc_hf1",
+ .id = SC8180X_MASTER_CAMNOC_HF1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf = {
+ .name = "mas_qxm_camnoc_sf",
+ .id = SC8180X_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp0 = {
+ .name = "mas_qxm_mdp0",
+ .id = SC8180X_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp1 = {
+ .name = "mas_qxm_mdp1",
+ .id = SC8180X_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_rot = {
+ .name = "mas_qxm_rot",
+ .id = SC8180X_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus0 = {
+ .name = "mas_qxm_venus0",
+ .id = SC8180X_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus1 = {
+ .name = "mas_qxm_venus1",
+ .id = SC8180X_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus_arm9 = {
+ .name = "mas_qxm_venus_arm9",
+ .id = SC8180X_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qhm_snoc_cfg = {
+ .name = "mas_qhm_snoc_cfg",
+ .id = SC8180X_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_aggre1_noc = {
+ .name = "mas_qnm_aggre1_noc",
+ .id = SC8180X_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 6,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_aggre2_noc = {
+ .name = "mas_qnm_aggre2_noc",
+ .id = SC8180X_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 11,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_PCIE_3,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SLAVE_PCIE_2,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_PCIE_0,
+ SC8180X_SLAVE_PCIE_1,
+ SC8180X_SLAVE_TCU,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_gemnoc = {
+ .name = "mas_qnm_gemnoc",
+ .id = SC8180X_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_TCU,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qxm_pimem = {
+ .name = "mas_qxm_pimem",
+ .id = SC8180X_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_xm_gic = {
+ .name = "mas_xm_gic",
+ .id = SC8180X_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_qup_core_0 = {
+ .name = "mas_qup_core_0",
+ .id = SC8180X_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_0 }
+};
+
+static struct qcom_icc_node mas_qup_core_1 = {
+ .name = "mas_qup_core_1",
+ .id = SC8180X_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_1 }
+};
+
+static struct qcom_icc_node mas_qup_core_2 = {
+ .name = "mas_qup_core_2",
+ .id = SC8180X_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_2 }
+};
+
+static struct qcom_icc_node slv_qns_a1noc_snoc = {
+ .name = "slv_qns_a1noc_snoc",
+ .id = SC8180X_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_srvc_aggre1_noc = {
+ .name = "slv_srvc_aggre1_noc",
+ .id = SC8180X_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_a2noc_snoc = {
+ .name = "slv_qns_a2noc_snoc",
+ .id = SC8180X_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_pcie_mem_noc = {
+ .name = "slv_qns_pcie_mem_noc",
+ .id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC }
+};
+
+static struct qcom_icc_node slv_srvc_aggre2_noc = {
+ .name = "slv_srvc_aggre2_noc",
+ .id = SC8180X_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_camnoc_uncomp = {
+ .name = "slv_qns_camnoc_uncomp",
+ .id = SC8180X_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_cdsp_mem_noc = {
+ .name = "slv_qns_cdsp_mem_noc",
+ .id = SC8180X_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_COMPUTE_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_a1_noc_cfg = {
+ .name = "slv_qhs_a1_noc_cfg",
+ .id = SC8180X_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_A1NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_a2_noc_cfg = {
+ .name = "slv_qhs_a2_noc_cfg",
+ .id = SC8180X_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_A2NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = {
+ .name = "slv_qhs_ahb2phy_refgen_center",
+ .id = SC8180X_SLAVE_AHB2PHY_CENTER,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = {
+ .name = "slv_qhs_ahb2phy_refgen_east",
+ .id = SC8180X_SLAVE_AHB2PHY_EAST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = {
+ .name = "slv_qhs_ahb2phy_refgen_west",
+ .id = SC8180X_SLAVE_AHB2PHY_WEST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_south = {
+ .name = "slv_qhs_ahb2phy_south",
+ .id = SC8180X_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aop = {
+ .name = "slv_qhs_aop",
+ .id = SC8180X_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aoss = {
+ .name = "slv_qhs_aoss",
+ .id = SC8180X_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_camera_cfg = {
+ .name = "slv_qhs_camera_cfg",
+ .id = SC8180X_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_clk_ctl = {
+ .name = "slv_qhs_clk_ctl",
+ .id = SC8180X_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_compute_dsp = {
+ .name = "slv_qhs_compute_dsp",
+ .id = SC8180X_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_cx = {
+ .name = "slv_qhs_cpr_cx",
+ .id = SC8180X_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mmcx = {
+ .name = "slv_qhs_cpr_mmcx",
+ .id = SC8180X_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mx = {
+ .name = "slv_qhs_cpr_mx",
+ .id = SC8180X_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_crypto0_cfg = {
+ .name = "slv_qhs_crypto0_cfg",
+ .id = SC8180X_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ddrss_cfg = {
+ .name = "slv_qhs_ddrss_cfg",
+ .id = SC8180X_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_CNOC_DC_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_display_cfg = {
+ .name = "slv_qhs_display_cfg",
+ .id = SC8180X_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_emac_cfg = {
+ .name = "slv_qhs_emac_cfg",
+ .id = SC8180X_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_glm = {
+ .name = "slv_qhs_glm",
+ .id = SC8180X_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gpuss_cfg = {
+ .name = "slv_qhs_gpuss_cfg",
+ .id = SC8180X_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qhs_imem_cfg = {
+ .name = "slv_qhs_imem_cfg",
+ .id = SC8180X_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ipa = {
+ .name = "slv_qhs_ipa",
+ .id = SC8180X_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mnoc_cfg = {
+ .name = "slv_qhs_mnoc_cfg",
+ .id = SC8180X_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_CNOC_MNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_npu_cfg = {
+ .name = "slv_qhs_npu_cfg",
+ .id = SC8180X_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie0_cfg = {
+ .name = "slv_qhs_pcie0_cfg",
+ .id = SC8180X_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie1_cfg = {
+ .name = "slv_qhs_pcie1_cfg",
+ .id = SC8180X_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie2_cfg = {
+ .name = "slv_qhs_pcie2_cfg",
+ .id = SC8180X_SLAVE_PCIE_2_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie3_cfg = {
+ .name = "slv_qhs_pcie3_cfg",
+ .id = SC8180X_SLAVE_PCIE_3_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pdm = {
+ .name = "slv_qhs_pdm",
+ .id = SC8180X_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pimem_cfg = {
+ .name = "slv_qhs_pimem_cfg",
+ .id = SC8180X_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_prng = {
+ .name = "slv_qhs_prng",
+ .id = SC8180X_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qdss_cfg = {
+ .name = "slv_qhs_qdss_cfg",
+ .id = SC8180X_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_0 = {
+ .name = "slv_qhs_qspi_0",
+ .id = SC8180X_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_1 = {
+ .name = "slv_qhs_qspi_1",
+ .id = SC8180X_SLAVE_QSPI_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east0 = {
+ .name = "slv_qhs_qupv3_east0",
+ .id = SC8180X_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east1 = {
+ .name = "slv_qhs_qupv3_east1",
+ .id = SC8180X_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_west = {
+ .name = "slv_qhs_qupv3_west",
+ .id = SC8180X_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc2 = {
+ .name = "slv_qhs_sdc2",
+ .id = SC8180X_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc4 = {
+ .name = "slv_qhs_sdc4",
+ .id = SC8180X_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_security = {
+ .name = "slv_qhs_security",
+ .id = SC8180X_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_snoc_cfg = {
+ .name = "slv_qhs_snoc_cfg",
+ .id = SC8180X_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_spss_cfg = {
+ .name = "slv_qhs_spss_cfg",
+ .id = SC8180X_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tcsr = {
+ .name = "slv_qhs_tcsr",
+ .id = SC8180X_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_east = {
+ .name = "slv_qhs_tlmm_east",
+ .id = SC8180X_SLAVE_TLMM_EAST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_south = {
+ .name = "slv_qhs_tlmm_south",
+ .id = SC8180X_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_west = {
+ .name = "slv_qhs_tlmm_west",
+ .id = SC8180X_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tsif = {
+ .name = "slv_qhs_tsif",
+ .id = SC8180X_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_card_cfg = {
+ .name = "slv_qhs_ufs_card_cfg",
+ .id = SC8180X_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = {
+ .name = "slv_qhs_ufs_mem0_cfg",
+ .id = SC8180X_SLAVE_UFS_MEM_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = {
+ .name = "slv_qhs_ufs_mem1_cfg",
+ .id = SC8180X_SLAVE_UFS_MEM_1_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_0 = {
+ .name = "slv_qhs_usb3_0",
+ .id = SC8180X_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_1 = {
+ .name = "slv_qhs_usb3_1",
+ .id = SC8180X_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_2 = {
+ .name = "slv_qhs_usb3_2",
+ .id = SC8180X_SLAVE_USB3_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_venus_cfg = {
+ .name = "slv_qhs_venus_cfg",
+ .id = SC8180X_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = {
+ .name = "slv_qhs_vsense_ctrl_cfg",
+ .id = SC8180X_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_cnoc = {
+ .name = "slv_srvc_cnoc",
+ .id = SC8180X_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gemnoc = {
+ .name = "slv_qhs_gemnoc",
+ .id = SC8180X_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_llcc = {
+ .name = "slv_qhs_llcc",
+ .id = SC8180X_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = {
+ .name = "slv_qhs_mdsp_ms_mpu_cfg",
+ .id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_ecc = {
+ .name = "slv_qns_ecc",
+ .id = SC8180X_SLAVE_ECC,
+ .channels = 1,
+ .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_gem_noc_snoc = {
+ .name = "slv_qns_gem_noc_snoc",
+ .id = SC8180X_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node slv_qns_llcc = {
+ .name = "slv_qns_llcc",
+ .id = SC8180X_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_LLCC }
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc = {
+ .name = "slv_srvc_gemnoc",
+ .id = SC8180X_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc1 = {
+ .name = "slv_srvc_gemnoc1",
+ .id = SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_ipa_core_slave = {
+ .name = "slv_ipa_core_slave",
+ .id = SC8180X_SLAVE_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = SC8180X_SLAVE_EBI_CH0,
+ .channels = 8,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns2_mem_noc = {
+ .name = "slv_qns2_mem_noc",
+ .id = SC8180X_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_mem_noc_hf = {
+ .name = "slv_qns_mem_noc_hf",
+ .id = SC8180X_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+ .name = "slv_srvc_mnoc",
+ .id = SC8180X_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_apss = {
+ .name = "slv_qhs_apss",
+ .id = SC8180X_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qns_cnoc = {
+ .name = "slv_qns_cnoc",
+ .id = SC8180X_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SNOC_CNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_gc = {
+ .name = "slv_qns_gemnoc_gc",
+ .id = SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_GC_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_sf = {
+ .name = "slv_qns_gemnoc_sf",
+ .id = SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qxs_imem = {
+ .name = "slv_qxs_imem",
+ .id = SC8180X_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qxs_pimem = {
+ .name = "slv_qxs_pimem",
+ .id = SC8180X_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = SC8180X_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_pcie_0 = {
+ .name = "slv_xs_pcie_0",
+ .id = SC8180X_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_1 = {
+ .name = "slv_xs_pcie_1",
+ .id = SC8180X_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_2 = {
+ .name = "slv_xs_pcie_2",
+ .id = SC8180X_SLAVE_PCIE_2,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_3 = {
+ .name = "slv_xs_pcie_3",
+ .id = SC8180X_SLAVE_PCIE_3,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_qdss_stm = {
+ .name = "slv_xs_qdss_stm",
+ .id = SC8180X_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_sys_tcu_cfg = {
+ .name = "slv_xs_sys_tcu_cfg",
+ .id = SC8180X_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qup_core_0 = {
+ .name = "slv_qup_core_0",
+ .id = SC8180X_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_1 = {
+ .name = "slv_qup_core_1",
+ .id = SC8180X_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_2 = {
+ .name = "slv_qup_core_2",
+ .id = SC8180X_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &slv_qns_llcc }
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_mem_noc_hf }
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_cdsp_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &mas_qxm_crypto }
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 57,
+ .nodes = { &mas_qnm_snoc,
+ &slv_qhs_a1_noc_cfg,
+ &slv_qhs_a2_noc_cfg,
+ &slv_qhs_ahb2phy_refgen_center,
+ &slv_qhs_ahb2phy_refgen_east,
+ &slv_qhs_ahb2phy_refgen_west,
+ &slv_qhs_ahb2phy_south,
+ &slv_qhs_aop,
+ &slv_qhs_aoss,
+ &slv_qhs_camera_cfg,
+ &slv_qhs_clk_ctl,
+ &slv_qhs_compute_dsp,
+ &slv_qhs_cpr_cx,
+ &slv_qhs_cpr_mmcx,
+ &slv_qhs_cpr_mx,
+ &slv_qhs_crypto0_cfg,
+ &slv_qhs_ddrss_cfg,
+ &slv_qhs_display_cfg,
+ &slv_qhs_emac_cfg,
+ &slv_qhs_glm,
+ &slv_qhs_gpuss_cfg,
+ &slv_qhs_imem_cfg,
+ &slv_qhs_ipa,
+ &slv_qhs_mnoc_cfg,
+ &slv_qhs_npu_cfg,
+ &slv_qhs_pcie0_cfg,
+ &slv_qhs_pcie1_cfg,
+ &slv_qhs_pcie2_cfg,
+ &slv_qhs_pcie3_cfg,
+ &slv_qhs_pdm,
+ &slv_qhs_pimem_cfg,
+ &slv_qhs_prng,
+ &slv_qhs_qdss_cfg,
+ &slv_qhs_qspi_0,
+ &slv_qhs_qspi_1,
+ &slv_qhs_qupv3_east0,
+ &slv_qhs_qupv3_east1,
+ &slv_qhs_qupv3_west,
+ &slv_qhs_sdc2,
+ &slv_qhs_sdc4,
+ &slv_qhs_security,
+ &slv_qhs_snoc_cfg,
+ &slv_qhs_spss_cfg,
+ &slv_qhs_tcsr,
+ &slv_qhs_tlmm_east,
+ &slv_qhs_tlmm_south,
+ &slv_qhs_tlmm_west,
+ &slv_qhs_tsif,
+ &slv_qhs_ufs_card_cfg,
+ &slv_qhs_ufs_mem0_cfg,
+ &slv_qhs_ufs_mem1_cfg,
+ &slv_qhs_usb3_0,
+ &slv_qhs_usb3_1,
+ &slv_qhs_usb3_2,
+ &slv_qhs_venus_cfg,
+ &slv_qhs_vsense_ctrl_cfg,
+ &slv_srvc_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 7,
+ .nodes = { &mas_qxm_camnoc_hf0_uncomp,
+ &mas_qxm_camnoc_hf1_uncomp,
+ &mas_qxm_camnoc_sf_uncomp,
+ &mas_qxm_camnoc_hf0,
+ &mas_qxm_camnoc_hf1,
+ &mas_qxm_mdp0,
+ &mas_qxm_mdp1 }
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .num_nodes = 3,
+ .nodes = { &mas_qup_core_0,
+ &mas_qup_core_1,
+ &mas_qup_core_2 }
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_gem_noc_snoc }
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .num_nodes = 6,
+ .nodes = { &mas_qxm_camnoc_sf,
+ &mas_qxm_rot,
+ &mas_qxm_venus0,
+ &mas_qxm_venus1,
+ &mas_qxm_venus_arm9,
+ &slv_qns2_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &mas_acm_apps }
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .nodes = { &slv_qns_gemnoc_sf }
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .nodes = { &slv_qxs_imem }
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = true,
+ .nodes = { &slv_qns_gemnoc_gc }
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+ .name = "CO2",
+ .nodes = { &mas_qnm_npu }
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+ .name = "IP0",
+ .nodes = { &slv_ipa_core_slave }
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = true,
+ .nodes = { &slv_srvc_aggre1_noc,
+ &slv_qns_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .nodes = { &slv_qxs_pimem }
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .num_nodes = 4,
+ .nodes = { &slv_xs_pcie_0,
+ &slv_xs_pcie_1,
+ &slv_xs_pcie_2,
+ &slv_xs_pcie_3 }
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_aggre1_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_aggre2_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_pcie_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_gemnoc }
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
&bcm_ce0,
- &bcm_qup0,
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_sn14,
&bcm_ce0,
- &bcm_qup0,
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
&bcm_acv,
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -249,7 +1631,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg,
[MASTER_UFS_CARD] = &mas_xm_ufs_card,
[MASTER_UFS_GEN4] = &mas_xm_ufs_g4,
@@ -261,7 +1643,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &mas_qhm_qdss_bam,
[MASTER_QSPI_0] = &mas_qhm_qspi,
@@ -285,19 +1667,19 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &mas_qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &mas_qnm_snoc,
[SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg,
@@ -357,13 +1739,13 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc,
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc,
[SLAVE_LLCC_CFG] = &slv_qhs_llcc,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_acm_apps,
[MASTER_GPU_TCU] = &mas_acm_gpu_tcu,
[MASTER_SYS_TCU] = &mas_acm_sys_tcu,
@@ -384,17 +1766,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &mas_ipa_core_master,
[SLAVE_IPA_CORE] = &slv_ipa_core_slave,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &mas_llcc_mc,
[SLAVE_EBI_CH0] = &slv_ebi,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1,
@@ -410,7 +1792,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc,
@@ -503,97 +1885,25 @@ static const struct qcom_icc_desc sc8180x_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
-}
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &mas_qup_core_0,
+ [MASTER_QUP_CORE_1] = &mas_qup_core_1,
+ [MASTER_QUP_CORE_2] = &mas_qup_core_2,
+ [SLAVE_QUP_CORE_0] = &slv_qup_core_0,
+ [SLAVE_QUP_CORE_1] = &slv_qup_core_1,
+ [SLAVE_QUP_CORE_2] = &slv_qup_core_2,
+};
+
+static const struct qcom_icc_desc sc8180x_qup_virt = {
+ .nodes = qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+ .bcms = qup_virt_bcms,
+ .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc },
@@ -606,14 +1916,15 @@ static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt },
{ .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt },
{ .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc },
+ { .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt },
{ .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc },
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8180x",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h
index e70cf7032f80..2eafd35543c7 100644
--- a/drivers/interconnect/qcom/sc8180x.h
+++ b/drivers/interconnect/qcom/sc8180x.h
@@ -171,4 +171,11 @@
#define SC8180X_MASTER_OSM_L3_APPS 161
#define SC8180X_SLAVE_OSM_L3 162
+#define SC8180X_MASTER_QUP_CORE_0 163
+#define SC8180X_MASTER_QUP_CORE_1 164
+#define SC8180X_MASTER_QUP_CORE_2 165
+#define SC8180X_SLAVE_QUP_CORE_0 166
+#define SC8180X_SLAVE_QUP_CORE_1 167
+#define SC8180X_SLAVE_QUP_CORE_2 168
+
#endif
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
new file mode 100644
index 000000000000..507fe5f89791
--- /dev/null
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -0,0 +1,2438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc8280xp.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc8280xp.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SC8280XP_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SC8280XP_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SC8280XP_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SC8280XP_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .links = { SC8280XP_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SC8280XP_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_1 = {
+ .name = "xm_emac_1",
+ .id = SC8280XP_MASTER_EMAC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SC8280XP_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SC8280XP_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SC8280XP_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SC8280XP_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .id = SC8280XP_MASTER_USB3_MP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host0 = {
+ .name = "xm_usb4_host0",
+ .id = SC8280XP_MASTER_USB4_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host1 = {
+ .name = "xm_usb4_host1",
+ .id = SC8280XP_MASTER_USB4_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SC8280XP_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SC8280XP_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SC8280XP_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SC8280XP_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sensorss_q6 = {
+ .name = "qxm_sensorss_q6",
+ .id = SC8280XP_MASTER_SENSORS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SC8280XP_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_0 = {
+ .name = "xm_emac_0",
+ .id = SC8280XP_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SC8280XP_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SC8280XP_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2a = {
+ .name = "xm_pcie3_2a",
+ .id = SC8280XP_MASTER_PCIE_2A,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2b = {
+ .name = "xm_pcie3_2b",
+ .id = SC8280XP_MASTER_PCIE_2B,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3a = {
+ .name = "xm_pcie3_3a",
+ .id = SC8280XP_MASTER_PCIE_3A,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3b = {
+ .name = "xm_pcie3_3b",
+ .id = SC8280XP_MASTER_PCIE_3B,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_4 = {
+ .name = "xm_pcie3_4",
+ .id = SC8280XP_MASTER_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SC8280XP_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SC8280XP_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SC8280XP_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node ipa_core_master = {
+ .name = "ipa_core_master",
+ .id = SC8280XP_MASTER_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_IPA_CORE },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SC8280XP_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 76,
+ .links = { SC8280XP_SLAVE_AHB2PHY_0,
+ SC8280XP_SLAVE_AHB2PHY_1,
+ SC8280XP_SLAVE_AHB2PHY_2,
+ SC8280XP_SLAVE_AOSS,
+ SC8280XP_SLAVE_APPSS,
+ SC8280XP_SLAVE_CAMERA_CFG,
+ SC8280XP_SLAVE_CLK_CTL,
+ SC8280XP_SLAVE_CDSP_CFG,
+ SC8280XP_SLAVE_CDSP1_CFG,
+ SC8280XP_SLAVE_RBCPR_CX_CFG,
+ SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+ SC8280XP_SLAVE_RBCPR_MX_CFG,
+ SC8280XP_SLAVE_CPR_NSPCX,
+ SC8280XP_SLAVE_CRYPTO_0_CFG,
+ SC8280XP_SLAVE_CX_RDPM,
+ SC8280XP_SLAVE_DCC_CFG,
+ SC8280XP_SLAVE_DISPLAY_CFG,
+ SC8280XP_SLAVE_DISPLAY1_CFG,
+ SC8280XP_SLAVE_EMAC_CFG,
+ SC8280XP_SLAVE_EMAC1_CFG,
+ SC8280XP_SLAVE_GFX3D_CFG,
+ SC8280XP_SLAVE_HWKM,
+ SC8280XP_SLAVE_IMEM_CFG,
+ SC8280XP_SLAVE_IPA_CFG,
+ SC8280XP_SLAVE_IPC_ROUTER_CFG,
+ SC8280XP_SLAVE_LPASS,
+ SC8280XP_SLAVE_MX_RDPM,
+ SC8280XP_SLAVE_MXC_RDPM,
+ SC8280XP_SLAVE_PCIE_0_CFG,
+ SC8280XP_SLAVE_PCIE_1_CFG,
+ SC8280XP_SLAVE_PCIE_2A_CFG,
+ SC8280XP_SLAVE_PCIE_2B_CFG,
+ SC8280XP_SLAVE_PCIE_3A_CFG,
+ SC8280XP_SLAVE_PCIE_3B_CFG,
+ SC8280XP_SLAVE_PCIE_4_CFG,
+ SC8280XP_SLAVE_PCIE_RSC_CFG,
+ SC8280XP_SLAVE_PDM,
+ SC8280XP_SLAVE_PIMEM_CFG,
+ SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+ SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+ SC8280XP_SLAVE_QDSS_CFG,
+ SC8280XP_SLAVE_QSPI_0,
+ SC8280XP_SLAVE_QUP_0,
+ SC8280XP_SLAVE_QUP_1,
+ SC8280XP_SLAVE_QUP_2,
+ SC8280XP_SLAVE_SDCC_2,
+ SC8280XP_SLAVE_SDCC_4,
+ SC8280XP_SLAVE_SECURITY,
+ SC8280XP_SLAVE_SMMUV3_CFG,
+ SC8280XP_SLAVE_SMSS_CFG,
+ SC8280XP_SLAVE_SPSS_CFG,
+ SC8280XP_SLAVE_TCSR,
+ SC8280XP_SLAVE_TLMM,
+ SC8280XP_SLAVE_UFS_CARD_CFG,
+ SC8280XP_SLAVE_UFS_MEM_CFG,
+ SC8280XP_SLAVE_USB3_0,
+ SC8280XP_SLAVE_USB3_1,
+ SC8280XP_SLAVE_USB3_MP,
+ SC8280XP_SLAVE_USB4_0,
+ SC8280XP_SLAVE_USB4_1,
+ SC8280XP_SLAVE_VENUS_CFG,
+ SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+ SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+ SC8280XP_SLAVE_A1NOC_CFG,
+ SC8280XP_SLAVE_A2NOC_CFG,
+ SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+ SC8280XP_SLAVE_DDRSS_CFG,
+ SC8280XP_SLAVE_CNOC_MNOC_CFG,
+ SC8280XP_SLAVE_SNOC_CFG,
+ SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+ SC8280XP_SLAVE_IMEM,
+ SC8280XP_SLAVE_PIMEM,
+ SC8280XP_SLAVE_SERVICE_CNOC,
+ SC8280XP_SLAVE_QDSS_STM,
+ SC8280XP_SLAVE_SMSS,
+ SC8280XP_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SC8280XP_SLAVE_PCIE_0,
+ SC8280XP_SLAVE_PCIE_1,
+ SC8280XP_SLAVE_PCIE_2A,
+ SC8280XP_SLAVE_PCIE_2B,
+ SC8280XP_SLAVE_PCIE_3A,
+ SC8280XP_SLAVE_PCIE_3B,
+ SC8280XP_SLAVE_PCIE_4
+ },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+ .name = "qnm_cnoc_dc_noc",
+ .id = SC8280XP_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_LLCC_CFG,
+ SC8280XP_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SC8280XP_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+ .name = "alm_pcie_tcu",
+ .id = SC8280XP_MASTER_PCIE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SC8280XP_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SC8280XP_MASTER_APPSS_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc0 = {
+ .name = "qnm_cmpnoc0",
+ .id = SC8280XP_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc1 = {
+ .name = "qnm_cmpnoc1",
+ .id = SC8280XP_MASTER_COMPUTE_NOC_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SC8280XP_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+ SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+ SC8280XP_SLAVE_SERVICE_GEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SC8280XP_MASTER_GFX3D,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SC8280XP_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SC8280XP_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SC8280XP_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SC8280XP_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SC8280XP_SLAVE_LPASS_CORE_CFG,
+ SC8280XP_SLAVE_LPASS_LPI_CFG,
+ SC8280XP_SLAVE_LPASS_MPU_CFG,
+ SC8280XP_SLAVE_LPASS_TOP_CFG,
+ SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+ .name = "qxm_lpass_dsp",
+ .id = SC8280XP_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SC8280XP_SLAVE_LPASS_TOP_CFG,
+ SC8280XP_SLAVE_LPASS_SNOC,
+ SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SC8280XP_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SC8280XP_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_0 = {
+ .name = "qnm_mdp0_0",
+ .id = SC8280XP_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_1 = {
+ .name = "qnm_mdp0_1",
+ .id = SC8280XP_MASTER_MDP1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_0 = {
+ .name = "qnm_mdp1_0",
+ .id = SC8280XP_MASTER_MDP_CORE1_0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_1 = {
+ .name = "qnm_mdp1_1",
+ .id = SC8280XP_MASTER_MDP_CORE1_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SC8280XP_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_rot_0 = {
+ .name = "qnm_rot_0",
+ .id = SC8280XP_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_rot_1 = {
+ .name = "qnm_rot_1",
+ .id = SC8280XP_MASTER_ROTATOR_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SC8280XP_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+ .name = "qnm_video1",
+ .id = SC8280XP_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SC8280XP_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp = {
+ .name = "qxm_camnoc_icp",
+ .id = SC8280XP_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SC8280XP_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SC8280XP_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SC8280XP_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_CDSP_MEM_NOC,
+ SC8280XP_SLAVE_NSP_XFR
+ },
+};
+
+static struct qcom_icc_node qhm_nspb_noc_config = {
+ .name = "qhm_nspb_noc_config",
+ .id = SC8280XP_MASTER_CDSPB_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC },
+};
+
+static struct qcom_icc_node qxm_nspb = {
+ .name = "qxm_nspb",
+ .id = SC8280XP_MASTER_CDSP_PROC_B,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_CDSPB_MEM_NOC,
+ SC8280XP_SLAVE_NSPB_XFR
+ },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SC8280XP_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SC8280XP_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_noc = {
+ .name = "qnm_aggre_usb_noc",
+ .id = SC8280XP_MASTER_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+ .name = "qnm_lpass_noc",
+ .id = SC8280XP_MASTER_LPASS_ANOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SC8280XP_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SC8280XP_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SC8280XP_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SC8280XP_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_snoc = {
+ .name = "qns_aggre_usb_snoc",
+ .id = SC8280XP_SLAVE_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SC8280XP_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SC8280XP_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_gem_noc = {
+ .name = "qns_pcie_gem_noc",
+ .id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SC8280XP_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ipa_core_slave = {
+ .name = "ipa_core_slave",
+ .id = SC8280XP_SLAVE_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SC8280XP_SLAVE_AHB2PHY_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SC8280XP_SLAVE_AHB2PHY_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = SC8280XP_SLAVE_AHB2PHY_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SC8280XP_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SC8280XP_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SC8280XP_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SC8280XP_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute0_cfg = {
+ .name = "qhs_compute0_cfg",
+ .id = SC8280XP_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_compute1_cfg = {
+ .name = "qhs_compute1_cfg",
+ .id = SC8280XP_SLAVE_CDSP1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CDSPB_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SC8280XP_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SC8280XP_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+ .name = "qhs_cpr_nspcx",
+ .id = SC8280XP_SLAVE_CPR_NSPCX,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SC8280XP_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SC8280XP_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SC8280XP_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display0_cfg = {
+ .name = "qhs_display0_cfg",
+ .id = SC8280XP_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display1_cfg = {
+ .name = "qhs_display1_cfg",
+ .id = SC8280XP_SLAVE_DISPLAY1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac0_cfg = {
+ .name = "qhs_emac0_cfg",
+ .id = SC8280XP_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac1_cfg = {
+ .name = "qhs_emac1_cfg",
+ .id = SC8280XP_SLAVE_EMAC1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SC8280XP_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SC8280XP_SLAVE_HWKM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SC8280XP_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SC8280XP_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SC8280XP_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SC8280XP_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SC8280XP_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mxc_rdpm = {
+ .name = "qhs_mxc_rdpm",
+ .id = SC8280XP_SLAVE_MXC_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SC8280XP_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SC8280XP_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2a_cfg = {
+ .name = "qhs_pcie2a_cfg",
+ .id = SC8280XP_SLAVE_PCIE_2A_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2b_cfg = {
+ .name = "qhs_pcie2b_cfg",
+ .id = SC8280XP_SLAVE_PCIE_2B_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+ .name = "qhs_pcie3a_cfg",
+ .id = SC8280XP_SLAVE_PCIE_3A_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+ .name = "qhs_pcie3b_cfg",
+ .id = SC8280XP_SLAVE_PCIE_3B_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .id = SC8280XP_SLAVE_PCIE_4_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+ .name = "qhs_pcie_rsc_cfg",
+ .id = SC8280XP_SLAVE_PCIE_RSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SC8280XP_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SC8280XP_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper_cfg = {
+ .name = "qhs_pka_wrapper_cfg",
+ .id = SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
+ .name = "qhs_pmu_wrapper_cfg",
+ .id = SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SC8280XP_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SC8280XP_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SC8280XP_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SC8280XP_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SC8280XP_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SC8280XP_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SC8280XP_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SC8280XP_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .id = SC8280XP_SLAVE_SMMUV3_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_smss_cfg = {
+ .name = "qhs_smss_cfg",
+ .id = SC8280XP_SLAVE_SMSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SC8280XP_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SC8280XP_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SC8280XP_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SC8280XP_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SC8280XP_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SC8280XP_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SC8280XP_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp = {
+ .name = "qhs_usb3_mp",
+ .id = SC8280XP_SLAVE_USB3_MP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_0 = {
+ .name = "qhs_usb4_host_0",
+ .id = SC8280XP_SLAVE_USB4_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_1 = {
+ .name = "qhs_usb4_host_1",
+ .id = SC8280XP_SLAVE_USB4_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SC8280XP_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = {
+ .name = "qhs_vsense_ctrl_r_cfg",
+ .id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SC8280XP_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SC8280XP_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = {
+ .name = "qns_anoc_pcie_bridge_cfg",
+ .id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SC8280XP_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SC8280XP_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SC8280XP_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_sf_bridge_cfg = {
+ .name = "qns_snoc_sf_bridge_cfg",
+ .id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SC8280XP_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SC8280XP_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SC8280XP_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SC8280XP_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SC8280XP_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2a = {
+ .name = "xs_pcie_2a",
+ .id = SC8280XP_SLAVE_PCIE_2A,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2b = {
+ .name = "xs_pcie_2b",
+ .id = SC8280XP_SLAVE_PCIE_2B,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+ .name = "xs_pcie_3a",
+ .id = SC8280XP_SLAVE_PCIE_3A,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+ .name = "xs_pcie_3b",
+ .id = SC8280XP_SLAVE_PCIE_3B,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .id = SC8280XP_SLAVE_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SC8280XP_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_smss = {
+ .name = "xs_smss",
+ .id = SC8280XP_SLAVE_SMSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SC8280XP_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SC8280XP_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SC8280XP_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SC8280XP_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SC8280XP_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SC8280XP_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SC8280XP_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SC8280XP_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SC8280XP_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+ .name = "qns_sysnoc",
+ .id = SC8280XP_SLAVE_LPASS_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SC8280XP_SLAVE_EBI1,
+ .channels = 8,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SC8280XP_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SC8280XP_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qxs_nsp_xfr = {
+ .name = "qxs_nsp_xfr",
+ .id = SC8280XP_SLAVE_NSP_XFR,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SC8280XP_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nspb_gemnoc = {
+ .name = "qns_nspb_gemnoc",
+ .id = SC8280XP_SLAVE_CDSPB_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_COMPUTE_NOC_1 },
+};
+
+static struct qcom_icc_node qxs_nspb_xfr = {
+ .name = "qxs_nspb_xfr",
+ .id = SC8280XP_SLAVE_NSPB_XFR,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nspb_noc = {
+ .name = "service_nspb_noc",
+ .id = SC8280XP_SLAVE_SERVICE_NSPB_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SC8280XP_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 9,
+ .nodes = { &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie,
+ &xs_pcie_0,
+ &xs_pcie_1,
+ &xs_pcie_2a,
+ &xs_pcie_2b,
+ &xs_pcie_3a,
+ &xs_pcie_3b,
+ &xs_pcie_4
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 67,
+ .nodes = { &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_ahb2phy2,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute0_cfg,
+ &qhs_compute1_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display0_cfg,
+ &qhs_display1_cfg,
+ &qhs_emac0_cfg,
+ &qhs_emac1_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mx_rdpm,
+ &qhs_mxc_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pcie2a_cfg,
+ &qhs_pcie2b_cfg,
+ &qhs_pcie3a_cfg,
+ &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg,
+ &qhs_pcie_rsc_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_security,
+ &qhs_smmuv3_cfg,
+ &qhs_smss_cfg,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_usb3_mp,
+ &qhs_usb4_host_0,
+ &qhs_usb4_host_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_vsense_ctrl_r_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_anoc_pcie_bridge_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &qns_snoc_sf_bridge_cfg,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+ .name = "CN2",
+ .num_nodes = 4,
+ .nodes = { &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn3 = {
+ .name = "CN3",
+ .num_nodes = 3,
+ .nodes = { &qxs_imem,
+ &xs_smss,
+ &xs_sys_tcu_cfg
+ },
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+ .name = "IP0",
+ .num_nodes = 1,
+ .nodes = { &ipa_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 5,
+ .nodes = { &qnm_camnoc_hf,
+ &qnm_mdp0_0,
+ &qnm_mdp0_1,
+ &qnm_mdp1_0,
+ &qns_mem_noc_hf
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 8,
+ .nodes = { &qnm_rot_0,
+ &qnm_rot_1,
+ &qnm_video0,
+ &qnm_video1,
+ &qnm_video_cvp,
+ &qxm_camnoc_icp,
+ &qxm_camnoc_sf,
+ &qns_mem_noc_sf
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsa0 = {
+ .name = "NSA0",
+ .num_nodes = 2,
+ .nodes = { &qns_nsp_gemnoc,
+ &qxs_nsp_xfr
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsa1 = {
+ .name = "NSA1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_nsb0 = {
+ .name = "NSB0",
+ .num_nodes = 2,
+ .nodes = { &qns_nspb_gemnoc,
+ &qxs_nspb_xfr
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsb1 = {
+ .name = "NSB1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nspb },
+};
+
+static struct qcom_icc_bcm bcm_pci0 = {
+ .name = "PCI0",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_gem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 2,
+ .nodes = { &qns_a1noc_snoc,
+ &qnm_aggre1_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 2,
+ .nodes = { &qns_a2noc_snoc,
+ &qnm_aggre2_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 2,
+ .nodes = { &qns_aggre_usb_snoc,
+ &qnm_aggre_usb_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .num_nodes = 2,
+ .nodes = { &qns_sysnoc,
+ &qnm_lpass_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_sn3,
+ &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_EMAC_1] = &xm_emac_1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [MASTER_USB4_0] = &xm_usb4_host0,
+ [MASTER_USB4_1] = &xm_usb4_host1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pci0,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_EMAC] = &xm_emac_0,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_2A] = &xm_pcie3_2a,
+ [MASTER_PCIE_2B] = &xm_pcie3_2b,
+ [MASTER_PCIE_3A] = &xm_pcie3_3a,
+ [MASTER_PCIE_3B] = &xm_pcie3_3b,
+ [MASTER_PCIE_4] = &xm_pcie3_4,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gem_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_ip0,
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sc8280xp_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+ &bcm_cn2,
+ &bcm_cn3,
+ &bcm_sn2,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
+ [SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
+ [SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
+ [SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_HWKM] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2A_CFG] = &qhs_pcie2a_cfg,
+ [SLAVE_PCIE_2B_CFG] = &qhs_pcie2b_cfg,
+ [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+ [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+ [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_SMSS_CFG] = &qhs_smss_cfg,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp,
+ [SLAVE_USB4_0] = &qhs_usb4_host_0,
+ [SLAVE_USB4_1] = &qhs_usb4_host_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_VSENSE_CTRL_R_CFG] = &qhs_vsense_ctrl_r_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_ANOC_PCIE_BRIDGE_CFG] = &qns_anoc_pcie_bridge_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_SNOC_SF_BRIDGE_CFG] = &qns_snoc_sf_bridge_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_2A] = &xs_pcie_2a,
+ [SLAVE_PCIE_2B] = &xs_pcie_2b,
+ [SLAVE_PCIE_3A] = &xs_pcie_3a,
+ [SLAVE_PCIE_3B] = &xs_pcie_3b,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_SMSS] = &xs_smss,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sc8280xp_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
+ [MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sc8280xp_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_MDP0] = &qnm_mdp0_0,
+ [MASTER_MDP1] = &qnm_mdp0_1,
+ [MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
+ [MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_ROTATOR] = &qnm_rot_0,
+ [MASTER_ROTATOR_1] = &qnm_rot_1,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_P1] = &qnm_video1,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
+ &bcm_nsa0,
+ &bcm_nsa1,
+};
+
+static struct qcom_icc_node * const nspa_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_NSP_XFR] = &qxs_nsp_xfr,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspa_noc = {
+ .nodes = nspa_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspa_noc_nodes),
+ .bcms = nspa_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspa_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
+ &bcm_nsb0,
+ &bcm_nsb1,
+};
+
+static struct qcom_icc_node * const nspb_noc_nodes[] = {
+ [MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
+ [MASTER_CDSP_PROC_B] = &qxm_nspb,
+ [SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
+ [SLAVE_NSPB_XFR] = &qxs_nspb_xfr,
+ [SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspb_noc = {
+ .nodes = nspb_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspb_noc_nodes),
+ .bcms = nspb_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspb_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_main_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const system_noc_main_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_USB_NOC_SNOC] = &qnm_aggre_usb_noc,
+ [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_system_noc_main = {
+ .nodes = system_noc_main_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_main_nodes),
+ .bcms = system_noc_main_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_main_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sc8280xp-aggre1-noc", .data = &sc8280xp_aggre1_noc, },
+ { .compatible = "qcom,sc8280xp-aggre2-noc", .data = &sc8280xp_aggre2_noc, },
+ { .compatible = "qcom,sc8280xp-clk-virt", .data = &sc8280xp_clk_virt, },
+ { .compatible = "qcom,sc8280xp-config-noc", .data = &sc8280xp_config_noc, },
+ { .compatible = "qcom,sc8280xp-dc-noc", .data = &sc8280xp_dc_noc, },
+ { .compatible = "qcom,sc8280xp-gem-noc", .data = &sc8280xp_gem_noc, },
+ { .compatible = "qcom,sc8280xp-lpass-ag-noc", .data = &sc8280xp_lpass_ag_noc, },
+ { .compatible = "qcom,sc8280xp-mc-virt", .data = &sc8280xp_mc_virt, },
+ { .compatible = "qcom,sc8280xp-mmss-noc", .data = &sc8280xp_mmss_noc, },
+ { .compatible = "qcom,sc8280xp-nspa-noc", .data = &sc8280xp_nspa_noc, },
+ { .compatible = "qcom,sc8280xp-nspb-noc", .data = &sc8280xp_nspb_noc, },
+ { .compatible = "qcom,sc8280xp-system-noc", .data = &sc8280xp_system_noc_main, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sc8280xp",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h
new file mode 100644
index 000000000000..74d8fa412d65
--- /dev/null
+++ b/drivers/interconnect/qcom/sc8280xp.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+
+#define SC8280XP_MASTER_GPU_TCU 0
+#define SC8280XP_MASTER_PCIE_TCU 1
+#define SC8280XP_MASTER_SYS_TCU 2
+#define SC8280XP_MASTER_APPSS_PROC 3
+#define SC8280XP_MASTER_IPA_CORE 4
+#define SC8280XP_MASTER_LLCC 5
+#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6
+#define SC8280XP_MASTER_CDSP_NOC_CFG 7
+#define SC8280XP_MASTER_CDSPB_NOC_CFG 8
+#define SC8280XP_MASTER_QDSS_BAM 9
+#define SC8280XP_MASTER_QSPI_0 10
+#define SC8280XP_MASTER_QUP_0 11
+#define SC8280XP_MASTER_QUP_1 12
+#define SC8280XP_MASTER_QUP_2 13
+#define SC8280XP_MASTER_A1NOC_CFG 14
+#define SC8280XP_MASTER_A2NOC_CFG 15
+#define SC8280XP_MASTER_A1NOC_SNOC 16
+#define SC8280XP_MASTER_A2NOC_SNOC 17
+#define SC8280XP_MASTER_USB_NOC_SNOC 18
+#define SC8280XP_MASTER_CAMNOC_HF 19
+#define SC8280XP_MASTER_COMPUTE_NOC 20
+#define SC8280XP_MASTER_COMPUTE_NOC_1 21
+#define SC8280XP_MASTER_CNOC_DC_NOC 22
+#define SC8280XP_MASTER_GEM_NOC_CFG 23
+#define SC8280XP_MASTER_GEM_NOC_CNOC 24
+#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC 25
+#define SC8280XP_MASTER_GFX3D 26
+#define SC8280XP_MASTER_LPASS_ANOC 27
+#define SC8280XP_MASTER_MDP0 28
+#define SC8280XP_MASTER_MDP1 29
+#define SC8280XP_MASTER_MDP_CORE1_0 30
+#define SC8280XP_MASTER_MDP_CORE1_1 31
+#define SC8280XP_MASTER_CNOC_MNOC_CFG 32
+#define SC8280XP_MASTER_MNOC_HF_MEM_NOC 33
+#define SC8280XP_MASTER_MNOC_SF_MEM_NOC 34
+#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC 35
+#define SC8280XP_MASTER_ROTATOR 36
+#define SC8280XP_MASTER_ROTATOR_1 37
+#define SC8280XP_MASTER_SNOC_CFG 38
+#define SC8280XP_MASTER_SNOC_GC_MEM_NOC 39
+#define SC8280XP_MASTER_SNOC_SF_MEM_NOC 40
+#define SC8280XP_MASTER_VIDEO_P0 41
+#define SC8280XP_MASTER_VIDEO_P1 42
+#define SC8280XP_MASTER_VIDEO_PROC 43
+#define SC8280XP_MASTER_QUP_CORE_0 44
+#define SC8280XP_MASTER_QUP_CORE_1 45
+#define SC8280XP_MASTER_QUP_CORE_2 46
+#define SC8280XP_MASTER_CAMNOC_ICP 47
+#define SC8280XP_MASTER_CAMNOC_SF 48
+#define SC8280XP_MASTER_CRYPTO 49
+#define SC8280XP_MASTER_IPA 50
+#define SC8280XP_MASTER_LPASS_PROC 51
+#define SC8280XP_MASTER_CDSP_PROC 52
+#define SC8280XP_MASTER_CDSP_PROC_B 53
+#define SC8280XP_MASTER_PIMEM 54
+#define SC8280XP_MASTER_SENSORS_PROC 55
+#define SC8280XP_MASTER_SP 56
+#define SC8280XP_MASTER_EMAC 57
+#define SC8280XP_MASTER_EMAC_1 58
+#define SC8280XP_MASTER_GIC 59
+#define SC8280XP_MASTER_PCIE_0 60
+#define SC8280XP_MASTER_PCIE_1 61
+#define SC8280XP_MASTER_PCIE_2A 62
+#define SC8280XP_MASTER_PCIE_2B 63
+#define SC8280XP_MASTER_PCIE_3A 64
+#define SC8280XP_MASTER_PCIE_3B 65
+#define SC8280XP_MASTER_PCIE_4 66
+#define SC8280XP_MASTER_QDSS_ETR 67
+#define SC8280XP_MASTER_SDCC_2 68
+#define SC8280XP_MASTER_SDCC_4 69
+#define SC8280XP_MASTER_UFS_CARD 70
+#define SC8280XP_MASTER_UFS_MEM 71
+#define SC8280XP_MASTER_USB3_0 72
+#define SC8280XP_MASTER_USB3_1 73
+#define SC8280XP_MASTER_USB3_MP 74
+#define SC8280XP_MASTER_USB4_0 75
+#define SC8280XP_MASTER_USB4_1 76
+#define SC8280XP_SLAVE_EBI1 512
+#define SC8280XP_SLAVE_IPA_CORE 513
+#define SC8280XP_SLAVE_AHB2PHY_0 514
+#define SC8280XP_SLAVE_AHB2PHY_1 515
+#define SC8280XP_SLAVE_AHB2PHY_2 516
+#define SC8280XP_SLAVE_AOSS 517
+#define SC8280XP_SLAVE_APPSS 518
+#define SC8280XP_SLAVE_CAMERA_CFG 519
+#define SC8280XP_SLAVE_CLK_CTL 520
+#define SC8280XP_SLAVE_CDSP_CFG 521
+#define SC8280XP_SLAVE_CDSP1_CFG 522
+#define SC8280XP_SLAVE_RBCPR_CX_CFG 523
+#define SC8280XP_SLAVE_RBCPR_MMCX_CFG 524
+#define SC8280XP_SLAVE_RBCPR_MX_CFG 525
+#define SC8280XP_SLAVE_CPR_NSPCX 526
+#define SC8280XP_SLAVE_CRYPTO_0_CFG 527
+#define SC8280XP_SLAVE_CX_RDPM 528
+#define SC8280XP_SLAVE_DCC_CFG 529
+#define SC8280XP_SLAVE_DISPLAY_CFG 530
+#define SC8280XP_SLAVE_DISPLAY1_CFG 531
+#define SC8280XP_SLAVE_EMAC_CFG 532
+#define SC8280XP_SLAVE_EMAC1_CFG 533
+#define SC8280XP_SLAVE_GFX3D_CFG 534
+#define SC8280XP_SLAVE_HWKM 535
+#define SC8280XP_SLAVE_IMEM_CFG 536
+#define SC8280XP_SLAVE_IPA_CFG 537
+#define SC8280XP_SLAVE_IPC_ROUTER_CFG 538
+#define SC8280XP_SLAVE_LLCC_CFG 539
+#define SC8280XP_SLAVE_LPASS 540
+#define SC8280XP_SLAVE_LPASS_CORE_CFG 541
+#define SC8280XP_SLAVE_LPASS_LPI_CFG 542
+#define SC8280XP_SLAVE_LPASS_MPU_CFG 543
+#define SC8280XP_SLAVE_LPASS_TOP_CFG 544
+#define SC8280XP_SLAVE_MX_RDPM 545
+#define SC8280XP_SLAVE_MXC_RDPM 546
+#define SC8280XP_SLAVE_PCIE_0_CFG 547
+#define SC8280XP_SLAVE_PCIE_1_CFG 548
+#define SC8280XP_SLAVE_PCIE_2A_CFG 549
+#define SC8280XP_SLAVE_PCIE_2B_CFG 550
+#define SC8280XP_SLAVE_PCIE_3A_CFG 551
+#define SC8280XP_SLAVE_PCIE_3B_CFG 552
+#define SC8280XP_SLAVE_PCIE_4_CFG 553
+#define SC8280XP_SLAVE_PCIE_RSC_CFG 554
+#define SC8280XP_SLAVE_PDM 555
+#define SC8280XP_SLAVE_PIMEM_CFG 556
+#define SC8280XP_SLAVE_PKA_WRAPPER_CFG 557
+#define SC8280XP_SLAVE_PMU_WRAPPER_CFG 558
+#define SC8280XP_SLAVE_QDSS_CFG 559
+#define SC8280XP_SLAVE_QSPI_0 560
+#define SC8280XP_SLAVE_QUP_0 561
+#define SC8280XP_SLAVE_QUP_1 562
+#define SC8280XP_SLAVE_QUP_2 563
+#define SC8280XP_SLAVE_SDCC_2 564
+#define SC8280XP_SLAVE_SDCC_4 565
+#define SC8280XP_SLAVE_SECURITY 566
+#define SC8280XP_SLAVE_SMMUV3_CFG 567
+#define SC8280XP_SLAVE_SMSS_CFG 568
+#define SC8280XP_SLAVE_SPSS_CFG 569
+#define SC8280XP_SLAVE_TCSR 570
+#define SC8280XP_SLAVE_TLMM 571
+#define SC8280XP_SLAVE_UFS_CARD_CFG 572
+#define SC8280XP_SLAVE_UFS_MEM_CFG 573
+#define SC8280XP_SLAVE_USB3_0 574
+#define SC8280XP_SLAVE_USB3_1 575
+#define SC8280XP_SLAVE_USB3_MP 576
+#define SC8280XP_SLAVE_USB4_0 577
+#define SC8280XP_SLAVE_USB4_1 578
+#define SC8280XP_SLAVE_VENUS_CFG 579
+#define SC8280XP_SLAVE_VSENSE_CTRL_CFG 580
+#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG 581
+#define SC8280XP_SLAVE_A1NOC_CFG 582
+#define SC8280XP_SLAVE_A1NOC_SNOC 583
+#define SC8280XP_SLAVE_A2NOC_CFG 584
+#define SC8280XP_SLAVE_A2NOC_SNOC 585
+#define SC8280XP_SLAVE_USB_NOC_SNOC 586
+#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG 587
+#define SC8280XP_SLAVE_DDRSS_CFG 588
+#define SC8280XP_SLAVE_GEM_NOC_CNOC 589
+#define SC8280XP_SLAVE_GEM_NOC_CFG 590
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC 591
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF 592
+#define SC8280XP_SLAVE_LLCC 593
+#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC 594
+#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC 595
+#define SC8280XP_SLAVE_CNOC_MNOC_CFG 596
+#define SC8280XP_SLAVE_CDSP_MEM_NOC 597
+#define SC8280XP_SLAVE_CDSPB_MEM_NOC 598
+#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC 599
+#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC 600
+#define SC8280XP_SLAVE_SNOC_CFG 601
+#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG 602
+#define SC8280XP_SLAVE_LPASS_SNOC 603
+#define SC8280XP_SLAVE_QUP_CORE_0 604
+#define SC8280XP_SLAVE_QUP_CORE_1 605
+#define SC8280XP_SLAVE_QUP_CORE_2 606
+#define SC8280XP_SLAVE_IMEM 607
+#define SC8280XP_SLAVE_NSP_XFR 608
+#define SC8280XP_SLAVE_NSPB_XFR 609
+#define SC8280XP_SLAVE_PIMEM 610
+#define SC8280XP_SLAVE_SERVICE_NSP_NOC 611
+#define SC8280XP_SLAVE_SERVICE_NSPB_NOC 612
+#define SC8280XP_SLAVE_SERVICE_A1NOC 613
+#define SC8280XP_SLAVE_SERVICE_A2NOC 614
+#define SC8280XP_SLAVE_SERVICE_CNOC 615
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1 616
+#define SC8280XP_SLAVE_SERVICE_MNOC 617
+#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC 618
+#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC 619
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2 620
+#define SC8280XP_SLAVE_SERVICE_SNOC 621
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC 622
+#define SC8280XP_SLAVE_PCIE_0 623
+#define SC8280XP_SLAVE_PCIE_1 624
+#define SC8280XP_SLAVE_PCIE_2A 625
+#define SC8280XP_SLAVE_PCIE_2B 626
+#define SC8280XP_SLAVE_PCIE_3A 627
+#define SC8280XP_SLAVE_PCIE_3B 628
+#define SC8280XP_SLAVE_PCIE_4 629
+#define SC8280XP_SLAVE_QDSS_STM 630
+#define SC8280XP_SLAVE_SMSS 631
+#define SC8280XP_SLAVE_TCU 632
+
+#endif
+
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 274a7139fe1a..8d879b0bcabc 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -1490,7 +1490,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
.slv_rpm_id = 29,
};
-static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
+static struct qcom_icc_node * const sdm660_a2noc_nodes[] = {
[MASTER_IPA] = &mas_ipa,
[MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
[MASTER_SDCC_1] = &mas_sdcc_1,
@@ -1512,7 +1512,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_a2noc = {
+static const struct qcom_icc_desc sdm660_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
@@ -1521,7 +1521,7 @@ static struct qcom_icc_desc sdm660_a2noc = {
.regmap_cfg = &sdm660_a2noc_regmap_config,
};
-static struct qcom_icc_node *sdm660_bimc_nodes[] = {
+static struct qcom_icc_node * const sdm660_bimc_nodes[] = {
[MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1540,14 +1540,14 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_bimc = {
+static const struct qcom_icc_desc sdm660_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
.regmap_cfg = &sdm660_bimc_regmap_config,
};
-static struct qcom_icc_node *sdm660_cnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
@@ -1594,14 +1594,14 @@ static const struct regmap_config sdm660_cnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_cnoc = {
+static const struct qcom_icc_desc sdm660_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_cnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
.regmap_cfg = &sdm660_cnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_gnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_gnoc_nodes[] = {
[MASTER_APSS_PROC] = &mas_apss_proc,
[SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
[SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
@@ -1615,14 +1615,14 @@ static const struct regmap_config sdm660_gnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_gnoc = {
+static const struct qcom_icc_desc sdm660_gnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_mnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_mnoc_nodes[] = {
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_P0] = &mas_mdp_p0,
@@ -1655,7 +1655,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_mnoc = {
+static const struct qcom_icc_desc sdm660_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
@@ -1664,7 +1664,7 @@ static struct qcom_icc_desc sdm660_mnoc = {
.regmap_cfg = &sdm660_mnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_snoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_snoc_nodes[] = {
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
@@ -1692,7 +1692,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_snoc = {
+static const struct qcom_icc_desc sdm660_snoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_snoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index d2195079c228..954e7bd13fc4 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -175,12 +175,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn9,
&bcm_qup0,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_SDCC_2] = &xm_sdc2,
@@ -201,13 +201,13 @@ static const struct qcom_icc_desc sdm845_aggre1_noc = {
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn11,
&bcm_qup0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
@@ -230,11 +230,11 @@ static const struct qcom_icc_desc sdm845_aggre2_noc = {
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[MASTER_TIC] = &qhm_tic,
[MASTER_SNOC_CNOC] = &qnm_snoc,
@@ -291,10 +291,10 @@ static const struct qcom_icc_desc sdm845_config_noc = {
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
@@ -307,10 +307,10 @@ static const struct qcom_icc_desc sdm845_dc_noc = {
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gladiator_noc_bcms[] = {
+static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
};
-static struct qcom_icc_node *gladiator_noc_nodes[] = {
+static struct qcom_icc_node * const gladiator_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_l3,
[MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
@@ -325,7 +325,7 @@ static const struct qcom_icc_desc sdm845_gladiator_noc = {
.num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
};
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_mc0,
&bcm_acv,
&bcm_sh0,
@@ -335,7 +335,7 @@ static struct qcom_icc_bcm *mem_noc_bcms[] = {
&bcm_sh5,
};
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
[MASTER_GNOC_MEM_NOC] = &qnm_apps,
@@ -360,14 +360,14 @@ static const struct qcom_icc_desc sdm845_mem_noc = {
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -394,7 +394,7 @@ static const struct qcom_icc_desc sdm845_mmss_noc = {
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -411,7 +411,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index e3ac25a997b7..130a828c3873 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -99,11 +99,11 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
@@ -115,13 +115,13 @@ static const struct qcom_icc_desc sdx55_mc_virt = {
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_AMPSS_M0] = &xm_apps_rdwr,
@@ -137,7 +137,7 @@ static const struct qcom_icc_desc sdx55_mem_noc = {
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_pn0,
&bcm_pn1,
@@ -156,7 +156,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn11,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_AUDIO] = &qhm_audio,
[MASTER_BLSP_1] = &qhm_blsp1,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
new file mode 100644
index 000000000000..b16d31d53e9b
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx65.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx65.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdx65.h"
+
+DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC);
+DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4);
+DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4);
+DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4);
+DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4);
+DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4);
+DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4);
+DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC);
+DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc);
+DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
+DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
+DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
+DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv);
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx65_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh3,
+};
+
+static struct qcom_icc_node * const mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_APPSS_PROC] = &xm_apps_rdwr,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+};
+
+static const struct qcom_icc_desc sdx65_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pn0,
+ &bcm_pn1,
+ &bcm_pn2,
+ &bcm_pn3,
+ &bcm_pn4,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_AUDIO] = &qhm_audio,
+ [MASTER_BLSP_1] = &qhm_blsp1,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_IPA] = &qnm_ipa,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
+ [MASTER_PCIE_0] = &xm_pcie,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_USB3] = &xm_usb3,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_AUDIO] = &qhs_audio,
+ [SLAVE_BLSP_1] = &qhs_blsp1,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_ECC_CFG] = &qhs_ecc_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_PARF] = &qhs_pcie_parf,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
+ [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+ [SLAVE_ANOC_SNOC] = &qns_aggre_noc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx65_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdx65-mc-virt",
+ .data = &sdx65_mc_virt},
+ { .compatible = "qcom,sdx65-mem-noc",
+ .data = &sdx65_mem_noc},
+ { .compatible = "qcom,sdx65-system-noc",
+ .data = &sdx65_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sdx65",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdx65.h b/drivers/interconnect/qcom/sdx65.h
new file mode 100644
index 000000000000..5dca6e8b32c9
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx65.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+
+#define SDX65_MASTER_TCU_0 0
+#define SDX65_MASTER_LLCC 1
+#define SDX65_MASTER_AUDIO 2
+#define SDX65_MASTER_BLSP_1 3
+#define SDX65_MASTER_QDSS_BAM 4
+#define SDX65_MASTER_QPIC 5
+#define SDX65_MASTER_SNOC_CFG 6
+#define SDX65_MASTER_SPMI_FETCHER 7
+#define SDX65_MASTER_ANOC_SNOC 8
+#define SDX65_MASTER_IPA 9
+#define SDX65_MASTER_MEM_NOC_SNOC 10
+#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11
+#define SDX65_MASTER_SNOC_GC_MEM_NOC 12
+#define SDX65_MASTER_CRYPTO 13
+#define SDX65_MASTER_APPSS_PROC 14
+#define SDX65_MASTER_IPA_PCIE 15
+#define SDX65_MASTER_PCIE_0 16
+#define SDX65_MASTER_QDSS_ETR 17
+#define SDX65_MASTER_SDCC_1 18
+#define SDX65_MASTER_USB3 19
+#define SDX65_SLAVE_EBI1 512
+#define SDX65_SLAVE_AOSS 513
+#define SDX65_SLAVE_APPSS 514
+#define SDX65_SLAVE_AUDIO 515
+#define SDX65_SLAVE_BLSP_1 516
+#define SDX65_SLAVE_CLK_CTL 517
+#define SDX65_SLAVE_CRYPTO_0_CFG 518
+#define SDX65_SLAVE_CNOC_DDRSS 519
+#define SDX65_SLAVE_ECC_CFG 520
+#define SDX65_SLAVE_IMEM_CFG 521
+#define SDX65_SLAVE_IPA_CFG 522
+#define SDX65_SLAVE_CNOC_MSS 523
+#define SDX65_SLAVE_PCIE_PARF 524
+#define SDX65_SLAVE_PDM 525
+#define SDX65_SLAVE_PRNG 526
+#define SDX65_SLAVE_QDSS_CFG 527
+#define SDX65_SLAVE_QPIC 528
+#define SDX65_SLAVE_SDCC_1 529
+#define SDX65_SLAVE_SNOC_CFG 530
+#define SDX65_SLAVE_SPMI_FETCHER 531
+#define SDX65_SLAVE_SPMI_VGI_COEX 532
+#define SDX65_SLAVE_TCSR 533
+#define SDX65_SLAVE_TLMM 534
+#define SDX65_SLAVE_USB3 535
+#define SDX65_SLAVE_USB3_PHY_CFG 536
+#define SDX65_SLAVE_ANOC_SNOC 537
+#define SDX65_SLAVE_LLCC 538
+#define SDX65_SLAVE_MEM_NOC_SNOC 539
+#define SDX65_SLAVE_SNOC_MEM_NOC_GC 540
+#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC 541
+#define SDX65_SLAVE_IMEM 542
+#define SDX65_SLAVE_SERVICE_SNOC 543
+#define SDX65_SLAVE_PCIE_0 544
+#define SDX65_SLAVE_QDSS_STM 545
+#define SDX65_SLAVE_TCU 546
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 745e3c36a61a..1d04a4bfea80 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -186,12 +186,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn3,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_EMAC] = &xm_emac,
@@ -202,21 +202,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8150_aggre1_noc = {
+static const struct qcom_icc_desc sm8150_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn14,
&bcm_sn3,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QSPI] = &qhm_qspi,
@@ -237,53 +237,53 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8150_aggre2_noc = {
+static const struct qcom_icc_desc sm8150_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
-static struct qcom_icc_desc sm8150_camnoc_virt = {
+static const struct qcom_icc_desc sm8150_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co1,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
-static struct qcom_icc_desc sm8150_compute_noc = {
+static const struct qcom_icc_desc sm8150_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -340,30 +340,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sm8150_config_noc = {
+static const struct qcom_icc_desc sm8150_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
-static struct qcom_icc_desc sm8150_dc_noc = {
+static const struct qcom_icc_desc sm8150_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
@@ -371,7 +371,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
&bcm_sh5,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_apps,
[MASTER_GPU_TCU] = &acm_gpu_tcu,
[MASTER_SYS_TCU] = &acm_sys_tcu,
@@ -391,54 +391,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
-static struct qcom_icc_desc sm8150_gem_noc = {
+static const struct qcom_icc_desc sm8150_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &ipa_core_master,
[SLAVE_IPA_CORE] = &ipa_core_slave,
};
-static struct qcom_icc_desc sm8150_ipa_virt = {
+static const struct qcom_icc_desc sm8150_ipa_virt = {
.nodes = ipa_virt_nodes,
.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
.bcms = ipa_virt_bcms,
.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
-static struct qcom_icc_desc sm8150_mc_virt = {
+static const struct qcom_icc_desc sm8150_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -454,14 +454,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sm8150_mmss_noc = {
+static const struct qcom_icc_desc sm8150_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
@@ -475,7 +475,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -495,7 +495,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8150_system_noc = {
+static const struct qcom_icc_desc sm8150_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index aa707582ea01..5cdb058fa095 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -195,12 +195,12 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie);
DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc);
DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn12,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
@@ -216,20 +216,20 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8250_aggre1_noc = {
+static const struct qcom_icc_desc sm8250_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn12,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
@@ -246,35 +246,35 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8250_aggre2_noc = {
+static const struct qcom_icc_desc sm8250_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
-static struct qcom_icc_desc sm8250_compute_noc = {
+static const struct qcom_icc_desc sm8250_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -329,37 +329,37 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sm8250_config_noc = {
+static const struct qcom_icc_desc sm8250_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
-static struct qcom_icc_desc sm8250_dc_noc = {
+static const struct qcom_icc_desc sm8250_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_AMPSS_M0] = &chm_apps,
@@ -379,54 +379,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
-static struct qcom_icc_desc sm8250_gem_noc = {
+static const struct qcom_icc_desc sm8250_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &ipa_core_master,
[SLAVE_IPA_CORE] = &ipa_core_slave,
};
-static struct qcom_icc_desc sm8250_ipa_virt = {
+static const struct qcom_icc_desc sm8250_ipa_virt = {
.nodes = ipa_virt_nodes,
.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
.bcms = ipa_virt_bcms,
.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
-static struct qcom_icc_desc sm8250_mc_virt = {
+static const struct qcom_icc_desc sm8250_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
@@ -442,17 +442,17 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sm8250_mmss_noc = {
+static const struct qcom_icc_desc sm8250_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *npu_noc_bcms[] = {
+static struct qcom_icc_bcm * const npu_noc_bcms[] = {
};
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_CDP] = &amm_npu_sys_cdp_w,
[MASTER_NPU_NOC_CFG] = &qhm_cfg,
@@ -468,14 +468,14 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
-static struct qcom_icc_desc sm8250_npu_noc = {
+static const struct qcom_icc_desc sm8250_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
.bcms = npu_noc_bcms,
.num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
@@ -489,7 +489,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -511,7 +511,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8250_system_noc = {
+static const struct qcom_icc_desc sm8250_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index c79f93a1ac73..5398e7c8d826 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -198,10 +198,10 @@ DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -213,21 +213,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8350_aggre1_noc = {
+static const struct qcom_icc_desc sm8350_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
@@ -244,14 +244,14 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8350_aggre2_noc = {
+static const struct qcom_icc_desc sm8350_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
@@ -259,7 +259,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = {
&bcm_sn4,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -323,30 +323,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8350_config_noc = {
+static const struct qcom_icc_desc sm8350_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
-static struct qcom_icc_desc sm8350_dc_noc = {
+static const struct qcom_icc_desc sm8350_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
@@ -354,7 +354,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
&bcm_sh0_disp,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -379,17 +379,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
-static struct qcom_icc_desc sm8350_gem_noc = {
+static const struct qcom_icc_desc sm8350_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -399,35 +399,35 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sm8350_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
-static struct qcom_icc_desc sm8350_mc_virt = {
+static const struct qcom_icc_desc sm8350_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
@@ -438,7 +438,7 @@ static struct qcom_icc_bcm *mmss_noc_bcms[] = {
&bcm_mm5_disp,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -459,40 +459,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
-static struct qcom_icc_desc sm8350_mmss_noc = {
+static const struct qcom_icc_desc sm8350_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sm8350_compute_noc = {
+static const struct qcom_icc_desc sm8350_compute_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -503,7 +503,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sm8350_system_noc = {
+static const struct qcom_icc_desc sm8350_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index 8d99ee6421df..7e3d372b712f 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1526,10 +1526,10 @@ static struct qcom_icc_bcm bcm_sh1_disp = {
.nodes = { &qnm_pcie_disp },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -1540,18 +1540,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8450_aggre1_noc = {
+static const struct qcom_icc_desc sm8450_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
@@ -1567,20 +1567,20 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8450_aggre2_noc = {
+static const struct qcom_icc_desc sm8450_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
@@ -1589,18 +1589,18 @@ static struct qcom_icc_node *clk_virt_nodes[] = {
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
-static struct qcom_icc_desc sm8450_clk_virt = {
+static const struct qcom_icc_desc sm8450_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1658,21 +1658,21 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8450_config_noc = {
+static const struct qcom_icc_desc sm8450_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh0_disp,
&bcm_sh1_disp,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -1693,17 +1693,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
-static struct qcom_icc_desc sm8450_gem_noc = {
+static const struct qcom_icc_desc sm8450_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
@@ -1715,42 +1715,42 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sm8450_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8450_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
-static struct qcom_icc_desc sm8450_mc_virt = {
+static const struct qcom_icc_desc sm8450_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm0_disp,
&bcm_mm1_disp,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -1771,36 +1771,36 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
-static struct qcom_icc_desc sm8450_mmss_noc = {
+static const struct qcom_icc_desc sm8450_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sm8450_nsp_noc = {
+static const struct qcom_icc_desc sm8450_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_sn7,
};
-static struct qcom_icc_node *pcie_anoc_nodes[] = {
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
@@ -1808,14 +1808,14 @@ static struct qcom_icc_node *pcie_anoc_nodes[] = {
[SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
};
-static struct qcom_icc_desc sm8450_pcie_anoc = {
+static const struct qcom_icc_desc sm8450_pcie_anoc = {
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -1823,7 +1823,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn4,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -1836,7 +1836,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sm8450_system_noc = {
+static const struct qcom_icc_desc sm8450_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
@@ -1848,7 +1848,7 @@ static int qnoc_probe(struct platform_device *pdev)
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
+ struct qcom_icc_node * const *qnodes;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 44fb8843e80e..4ab1038b5482 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -557,7 +557,7 @@ config LOONGSON_LIOINTC
config LOONGSON_HTPIC
bool "Loongson3 HyperTransport PIC Controller"
- depends on MACH_LOONGSON64
+ depends on MACH_LOONGSON64 && MIPS
default y
select IRQ_DOMAIN
select GENERIC_IRQ_CHIP
@@ -565,12 +565,12 @@ config LOONGSON_HTPIC
Support for the Loongson-3 HyperTransport PIC Controller.
config LOONGSON_HTVEC
- bool "Loongson3 HyperTransport Interrupt Vector Controller"
+ bool "Loongson HyperTransport Interrupt Vector Controller"
depends on MACH_LOONGSON64
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
- Support for the Loongson3 HyperTransport Interrupt Vector Controller.
+ Support for the Loongson HyperTransport Interrupt Vector Controller.
config LOONGSON_PCH_PIC
bool "Loongson PCH PIC Controller"
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 649c58391618..aed88857d90f 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -16,7 +16,11 @@
#include <linux/smp.h>
#include <linux/irqchip/chained_irq.h>
+#ifdef CONFIG_MIPS
#include <loongson.h>
+#else
+#include <asm/loongson.h>
+#endif
#define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4
@@ -53,7 +57,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_chip_generic *gc = handler->priv->gc;
- int core = get_ebase_cpunum() % LIOINTC_NUM_CORES;
+ int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES;
u32 pending;
chained_irq_enter(chip, desc);
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 42dc46e3f00f..9aa3fccd71fb 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/leds.h>
-#include <mach/hardware.h>
#include <asm/hardware/locomo.h>
static void locomoled_brightness_set(struct led_classdev *led_cdev,
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9ed9c955add7..2acda9cea0f9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -395,6 +395,13 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
+
+ /*
+ * Retry to update writeback_rate if contention happens for
+ * down_read(dc->writeback_lock) in update_writeback_rate()
+ */
+#define BCH_WBRATE_UPDATE_MAX_SKIPS 15
+ unsigned int rate_update_retry;
};
enum alloc_reserve {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ad9f16689419..e136d6edc1ed 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
int i;
struct bkey *k = NULL;
struct btree_iter iter;
- struct btree_check_state *check_state;
- char name[32];
+ struct btree_check_state check_state;
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,63 +2017,59 @@ int bch_btree_check(struct cache_set *c)
if (c->root->level == 0)
return 0;
- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
- if (!check_state)
- return -ENOMEM;
-
- check_state->c = c;
- check_state->total_threads = bch_btree_chkthread_nr();
- check_state->key_idx = 0;
- spin_lock_init(&check_state->idx_lock);
- atomic_set(&check_state->started, 0);
- atomic_set(&check_state->enough, 0);
- init_waitqueue_head(&check_state->wait);
+ memset(&check_state, 0, sizeof(struct btree_check_state));
+ check_state.c = c;
+ check_state.total_threads = bch_btree_chkthread_nr();
+ check_state.key_idx = 0;
+ spin_lock_init(&check_state.idx_lock);
+ atomic_set(&check_state.started, 0);
+ atomic_set(&check_state.enough, 0);
+ init_waitqueue_head(&check_state.wait);
+ rw_lock(0, c->root, c->root->level);
/*
* Run multiple threads to check btree nodes in parallel,
- * if check_state->enough is non-zero, it means current
+ * if check_state.enough is non-zero, it means current
* running check threads are enough, unncessary to create
* more.
*/
- for (i = 0; i < check_state->total_threads; i++) {
- /* fetch latest check_state->enough earlier */
+ for (i = 0; i < check_state.total_threads; i++) {
+ /* fetch latest check_state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&check_state->enough))
+ if (atomic_read(&check_state.enough))
break;
- check_state->infos[i].result = 0;
- check_state->infos[i].state = check_state;
- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
- atomic_inc(&check_state->started);
+ check_state.infos[i].result = 0;
+ check_state.infos[i].state = &check_state;
- check_state->infos[i].thread =
+ check_state.infos[i].thread =
kthread_run(bch_btree_check_thread,
- &check_state->infos[i],
- name);
- if (IS_ERR(check_state->infos[i].thread)) {
+ &check_state.infos[i],
+ "bch_btrchk[%d]", i);
+ if (IS_ERR(check_state.infos[i].thread)) {
pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(check_state->infos[i].thread);
+ kthread_stop(check_state.infos[i].thread);
ret = -ENOMEM;
goto out;
}
+ atomic_inc(&check_state.started);
}
/*
* Must wait for all threads to stop.
*/
- wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0);
+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
- for (i = 0; i < check_state->total_threads; i++) {
- if (check_state->infos[i].result) {
- ret = check_state->infos[i].result;
+ for (i = 0; i < check_state.total_threads; i++) {
+ if (check_state.infos[i].result) {
+ ret = check_state.infos[i].result;
goto out;
}
}
out:
- kfree(check_state);
+ rw_unlock(0, c->root);
return ret;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 50482107134f..1b5fdbc0d83e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -226,7 +226,7 @@ struct btree_check_info {
int result;
};
-#define BCH_BTR_CHKTHREAD_MAX 64
+#define BCH_BTR_CHKTHREAD_MAX 12
struct btree_check_state {
struct cache_set *c;
int total_threads;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index df5347ea450b..e5da469a4235 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -405,6 +405,11 @@ err:
return ret;
}
+void bch_journal_space_reserve(struct journal *j)
+{
+ j->do_reserve = true;
+}
+
/* Journalling */
static void btree_flush_write(struct cache_set *c)
@@ -621,12 +626,30 @@ static void do_journal_discard(struct cache *ca)
}
}
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+ struct cache *ca = c->cache;
+ struct journal_device *ja = &c->cache->journal;
+ unsigned int n;
+
+ /* In case njournal_buckets is not power of 2 */
+ if (ja->cur_idx >= ja->discard_idx)
+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ else
+ n = ja->discard_idx - ja->cur_idx;
+
+ if (n > (1 + j->do_reserve))
+ return n - (1 + j->do_reserve);
+
+ return 0;
+}
+
static void journal_reclaim(struct cache_set *c)
{
struct bkey *k = &c->journal.key;
struct cache *ca = c->cache;
uint64_t last_seq;
- unsigned int next;
struct journal_device *ja = &ca->journal;
atomic_t p __maybe_unused;
@@ -649,12 +672,10 @@ static void journal_reclaim(struct cache_set *c)
if (c->journal.blocks_free)
goto out;
- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
- /* No space available on this device */
- if (next == ja->discard_idx)
+ if (!free_journal_buckets(c))
goto out;
- ja->cur_idx = next;
+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
k->ptr[0] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index f2ea34d5f431..cd316b4a1e95 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -105,6 +105,7 @@ struct journal {
spinlock_t lock;
spinlock_t flush_write_lock;
bool btree_flushing;
+ bool do_reserve;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list);
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 9c5dde73da88..f2c5a7e06fa9 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1105,6 +1105,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
* which would call closure_get(&dc->disk.cl)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
ddip->d = d;
/* Count on the bcache device */
ddip->orig_bdev = orig_bdev;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2f49e31142f6..3563d15dbaf2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2127,6 +2127,7 @@ static int run_cache_set(struct cache_set *c)
flash_devs_run(c);
+ bch_journal_space_reserve(&c->journal);
set_bit(CACHE_SET_RUNNING, &c->flags);
return 0;
err:
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 9ee0005874cd..3f0ff3aab6f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -235,19 +235,27 @@ static void update_writeback_rate(struct work_struct *work)
return;
}
- if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
- /*
- * If the whole cache set is idle, set_at_max_writeback_rate()
- * will set writeback rate to a max number. Then it is
- * unncessary to update writeback rate for an idle cache set
- * in maximum writeback rate number(s).
- */
- if (!set_at_max_writeback_rate(c, dc)) {
- down_read(&dc->writeback_lock);
+ /*
+ * If the whole cache set is idle, set_at_max_writeback_rate()
+ * will set writeback rate to a max number. Then it is
+ * unncessary to update writeback rate for an idle cache set
+ * in maximum writeback rate number(s).
+ */
+ if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
+ !set_at_max_writeback_rate(c, dc)) {
+ do {
+ if (!down_read_trylock((&dc->writeback_lock))) {
+ dc->rate_update_retry++;
+ if (dc->rate_update_retry <=
+ BCH_WBRATE_UPDATE_MAX_SKIPS)
+ break;
+ down_read(&dc->writeback_lock);
+ dc->rate_update_retry = 0;
+ }
__update_writeback_rate(dc);
update_gc_after_writeback(c);
up_read(&dc->writeback_lock);
- }
+ } while (0);
}
@@ -805,13 +813,11 @@ static int bch_writeback_thread(void *arg)
/* Init */
#define INIT_KEYS_EACH_TIME 500000
-#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
unsigned int inode;
size_t count;
- struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -827,11 +833,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
KEY_START(k), KEY_SIZE(k));
op->count++;
- if (atomic_read(&b->c->search_inflight) &&
- !(op->count % INIT_KEYS_EACH_TIME)) {
- bkey_copy_key(&op->start, k);
- return -EAGAIN;
- }
+ if (!(op->count % INIT_KEYS_EACH_TIME))
+ cond_resched();
return MAP_CONTINUE;
}
@@ -846,24 +849,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
-
- do {
- ret = bcache_btree(map_keys_recurse,
- k,
- c->root,
- &op.op,
- &op.start,
- sectors_dirty_init_fn,
- 0);
- if (ret == -EAGAIN)
- schedule_timeout_interruptible(
- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
- else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
- break;
- }
- } while (ret == -EAGAIN);
+
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn,
+ 0);
+ if (ret < 0)
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
return ret;
}
@@ -907,7 +902,6 @@ static int bch_dirty_init_thread(void *arg)
goto out;
}
skip_nr--;
- cond_resched();
}
if (p) {
@@ -917,7 +911,6 @@ static int bch_dirty_init_thread(void *arg)
p = NULL;
prev_idx = cur_idx;
- cond_resched();
}
out:
@@ -948,67 +941,56 @@ void bch_sectors_dirty_init(struct bcache_device *d)
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
- struct bch_dirty_init_state *state;
- char name[32];
+ struct bch_dirty_init_state state;
/* Just count root keys if no leaf node */
+ rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k);
- return;
- }
- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
- if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory\n");
+ rw_unlock(0, c->root);
return;
}
- state->c = c;
- state->d = d;
- state->total_threads = bch_btre_dirty_init_thread_nr();
- state->key_idx = 0;
- spin_lock_init(&state->idx_lock);
- atomic_set(&state->started, 0);
- atomic_set(&state->enough, 0);
- init_waitqueue_head(&state->wait);
-
- for (i = 0; i < state->total_threads; i++) {
- /* Fetch latest state->enough earlier */
+ memset(&state, 0, sizeof(struct bch_dirty_init_state));
+ state.c = c;
+ state.d = d;
+ state.total_threads = bch_btre_dirty_init_thread_nr();
+ state.key_idx = 0;
+ spin_lock_init(&state.idx_lock);
+ atomic_set(&state.started, 0);
+ atomic_set(&state.enough, 0);
+ init_waitqueue_head(&state.wait);
+
+ for (i = 0; i < state.total_threads; i++) {
+ /* Fetch latest state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&state->enough))
+ if (atomic_read(&state.enough))
break;
- state->infos[i].state = state;
- atomic_inc(&state->started);
- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
- state->infos[i].thread =
- kthread_run(bch_dirty_init_thread,
- &state->infos[i],
- name);
- if (IS_ERR(state->infos[i].thread)) {
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(state->infos[i].thread);
+ kthread_stop(state.infos[i].thread);
goto out;
}
+ atomic_inc(&state.started);
}
- /*
- * Must wait for all threads to stop.
- */
- wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0);
-
out:
- kfree(state);
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+ rw_unlock(0, c->root);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -1032,6 +1014,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_fp_term_high = 1000;
dc->writeback_rate_i_term_inverse = 10000;
+ /* For dc->writeback_lock contention in update_writeback_rate() */
+ dc->rate_update_retry = 0;
+
WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 02b2f9df73f6..31df716951f6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -20,7 +20,7 @@
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
-#define BCH_DIRTY_INIT_THRD_MAX 64
+#define BCH_DIRTY_INIT_THRD_MAX 12
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9526ccbedafb..5e41fbae3f6b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, false);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 138a3b25c5c8..6e7797b4e738 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -206,7 +206,6 @@ static void linear_free(struct mddev *mddev, void *priv)
static bool linear_make_request(struct mddev *mddev, struct bio *bio)
{
- char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
sector_t start_sector, end_sector, data_offset;
sector_t bio_sector = bio->bi_iter.bi_sector;
@@ -256,10 +255,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
return true;
out_of_bounds:
- pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
+ tmp_dev->rdev->bdev,
(unsigned long long)tmp_dev->rdev->sectors,
(unsigned long long)start_sector);
bio_io_error(bio);
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 1c6dbf92c136..66edf5e72bd6 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -87,10 +87,9 @@ static void multipath_end_request(struct bio *bio)
/*
* oops, IO error:
*/
- char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- pr_info("multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
+ pr_info("multipath: %pg: rescheduling sector %llu\n",
+ rdev->bdev,
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
@@ -154,7 +153,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
- char b[BDEVNAME_SIZE];
if (conf->raid_disks - mddev->degraded <= 1) {
/*
@@ -177,9 +175,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ pr_err("multipath: IO failure on %pg, disabling IO path.\n"
"multipath: Operation continuing on %d IO paths.\n",
- bdevname(rdev->bdev, b),
+ rdev->bdev,
conf->raid_disks - mddev->degraded);
}
@@ -197,12 +195,11 @@ static void print_multipath_conf (struct mpconf *conf)
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- pr_debug(" disk%d, o:%d, dev:%s\n",
+ pr_debug(" disk%d, o:%d, dev:%pg\n",
i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ tmp->rdev->bdev);
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 707e802d0082..8273ac5eef06 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1021,8 +1021,6 @@ EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev *rdev, int size)
{
- char b[BDEVNAME_SIZE];
-
if (rdev->sb_loaded)
return 0;
@@ -1032,8 +1030,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- pr_err("md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %pg, could not read superblock.\n",
+ rdev->bdev);
return -EINVAL;
}
@@ -1179,7 +1177,6 @@ EXPORT_SYMBOL(md_check_no_bitmap);
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
bool spare_disk = true;
@@ -1198,19 +1195,19 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ret = -EINVAL;
- bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- pr_warn("md: invalid raid superblock magic on %s\n", b);
+ pr_warn("md: invalid raid superblock magic on %pg\n",
+ rdev->bdev);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- pr_warn("Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version, b);
+ pr_warn("Bad version number %d.%d on %pg\n",
+ sb->major_version, sb->minor_version, rdev->bdev);
goto abort;
}
@@ -1218,7 +1215,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- pr_warn("md: invalid superblock checksum on %s\n", b);
+ pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
goto abort;
}
@@ -1250,13 +1247,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!md_uuid_equal(refsb, sb)) {
- pr_warn("md: %s has different UUID to %s\n",
- b, bdevname(refdev->bdev,b2));
+ pr_warn("md: %pg has different UUID to %pg\n",
+ rdev->bdev, refdev->bdev);
goto abort;
}
if (!md_sb_equal(refsb, sb)) {
- pr_warn("md: %s has same UUID but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %pg has same UUID but different superblock to %pg\n",
+ rdev->bdev, refdev->bdev);
goto abort;
}
ev1 = md_event(sb);
@@ -1620,7 +1617,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
int ret;
sector_t sb_start;
sector_t sectors;
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
bool spare_disk = true;
@@ -1664,13 +1660,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- pr_warn("md: invalid superblock checksum on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: invalid superblock checksum on %pg\n",
+ rdev->bdev);
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- pr_warn("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %pg\n",
+ rdev->bdev);
return -EINVAL;
}
if (sb->pad0 ||
@@ -1776,9 +1772,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- pr_warn("md: %s has strangely different superblock to %s\n",
- bdevname(rdev->bdev,b),
- bdevname(refdev->bdev,b2));
+ pr_warn("md: %pg has strangely different superblock to %pg\n",
+ rdev->bdev,
+ refdev->bdev);
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
@@ -2365,7 +2361,6 @@ EXPORT_SYMBOL(md_integrity_register);
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_mddev;
- char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
return 0;
@@ -2376,8 +2371,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- pr_err("%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %pg\n",
+ mdname(mddev), rdev->bdev);
return -ENXIO;
}
@@ -2486,11 +2481,9 @@ static void rdev_delayed_delete(struct work_struct *ws)
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
-
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%pg>\n", rdev->bdev);
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
@@ -2543,9 +2536,7 @@ void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
-
- pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2803,8 +2794,6 @@ repeat:
rewrite:
md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
- char b[BDEVNAME_SIZE];
-
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
@@ -2812,8 +2801,8 @@ rewrite:
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- pr_debug("md: (write) %s's sb offset: %llu\n",
- bdevname(rdev->bdev, b),
+ pr_debug("md: (write) %pg's sb offset: %llu\n",
+ rdev->bdev,
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
@@ -2825,8 +2814,8 @@ rewrite:
}
} else
- pr_debug("md: %s (skipping faulty)\n",
- bdevname(rdev->bdev, b));
+ pr_debug("md: %pg (skipping faulty)\n",
+ rdev->bdev);
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
@@ -3701,7 +3690,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
- char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
@@ -3725,8 +3713,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
if (!size) {
- pr_warn("md: %s has zero or unknown size, marking faulty!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
+ rdev->bdev);
err = -EINVAL;
goto abort_free;
}
@@ -3735,14 +3723,14 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
- bdevname(rdev->bdev,b),
+ pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
+ rdev->bdev,
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- pr_warn("md: could not read %s's sb, not importing!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: could not read %pg's sb, not importing!\n",
+ rdev->bdev);
goto abort_free;
}
}
@@ -3765,7 +3753,6 @@ static int analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
- char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
@@ -3777,8 +3764,8 @@ static int analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
+ rdev->bdev);
md_kick_rdev_from_array(rdev);
}
@@ -3796,8 +3783,8 @@ static int analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- pr_warn("md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ pr_warn("md: %s: %pg: only %d devices permitted\n",
+ mdname(mddev), rdev->bdev,
mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
@@ -3805,8 +3792,8 @@ static int analyze_sbs(struct mddev *mddev)
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- pr_warn("md: kicking non-fresh %s from array!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: kicking non-fresh %pg from array!\n",
+ rdev->bdev);
md_kick_rdev_from_array(rdev);
continue;
}
@@ -4844,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
}
mddev_unlock(mddev);
}
@@ -5598,8 +5585,6 @@ static void md_free(struct kobject *ko)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
kfree(mddev);
}
@@ -5912,7 +5897,6 @@ int md_run(struct mddev *mddev)
/* Warn if this is a potentially silly
* configuration.
*/
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
@@ -5921,10 +5905,10 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_disk ==
rdev2->bdev->bd_disk) {
- pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ rdev->bdev,
+ rdev2->bdev);
warned = 1;
}
}
@@ -6213,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
}
del_timer_sync(&mddev->safemode_timer);
@@ -6285,8 +6269,6 @@ void md_stop(struct mddev *mddev)
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -6452,8 +6434,7 @@ static void autorun_array(struct mddev *mddev)
pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
- char b[BDEVNAME_SIZE];
- pr_cont("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%pg>", rdev->bdev);
}
pr_cont("\n");
@@ -6480,7 +6461,6 @@ static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
- char b[BDEVNAME_SIZE];
pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
@@ -6490,12 +6470,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %pg ...\n", rdev0->bdev);
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- pr_debug("md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %pg ...\n",
+ rdev->bdev);
list_move(&rdev->same_set, &candidates);
}
/*
@@ -6512,8 +6492,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- pr_warn("md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %pg is bad: %d\n",
+ rdev0->bdev, rdev0->preferred_minor);
break;
}
@@ -6526,8 +6506,8 @@ static void autorun_devices(int part)
pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- pr_warn("md: %s already running, cannot run %s\n",
- mdname(mddev), bdevname(rdev0->bdev,b));
+ pr_warn("md: %s already running, cannot run %pg\n",
+ mdname(mddev), rdev0->bdev);
mddev_unlock(mddev);
} else {
pr_debug("md: created %s\n", mdname(mddev));
@@ -6701,7 +6681,6 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
{
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
@@ -6731,9 +6710,9 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- pr_warn("md: %s has different UUID to %s\n",
- bdevname(rdev->bdev,b),
- bdevname(rdev0->bdev,b2));
+ pr_warn("md: %pg has different UUID to %pg\n",
+ rdev->bdev,
+ rdev0->bdev);
export_rdev(rdev);
return -EINVAL;
}
@@ -6908,7 +6887,6 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
if (!mddev->pers)
@@ -6943,14 +6921,13 @@ kick_rdev:
return 0;
busy:
- pr_debug("md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %pg from %s ...\n",
+ rdev->bdev, mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
- char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
@@ -6983,8 +6960,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- pr_warn("md: can not hot-add faulty %s disk to %s!\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
+ rdev->bdev, mdname(mddev));
err = -EINVAL;
goto abort_export;
}
@@ -7011,8 +6988,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* disable on the whole MD.
*/
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
- pr_info("%s: Disabling nowait because %s does not support nowait\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_info("%s: Disabling nowait because %pg does not support nowait\n",
+ mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
}
/*
@@ -7963,17 +7940,22 @@ EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
- struct md_thread *thread = *threadp;
- if (!thread)
- return;
- pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
- /* Locking ensures that mddev_unlock does not wake_up a
+ struct md_thread *thread;
+
+ /*
+ * Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
+ thread = *threadp;
+ if (!thread) {
+ spin_unlock(&pers_lock);
+ return;
+ }
*threadp = NULL;
spin_unlock(&pers_lock);
+ pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
}
@@ -8012,10 +7994,8 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
- char b[BDEVNAME_SIZE];
i++;
- seq_printf(seq, "%s ",
- bdevname(rdev->bdev,b));
+ seq_printf(seq, "%pg ", rdev->bdev);
}
if (!i)
seq_printf(seq, "<none>");
@@ -8255,9 +8235,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
sectors = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
- char b[BDEVNAME_SIZE];
- seq_printf(seq, " %s[%d]",
- bdevname(rdev->bdev,b), rdev->desc_nr);
+ seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
+
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Journal, &rdev->flags))
@@ -9324,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9359,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
@@ -9432,14 +9411,18 @@ void md_check_recovery(struct mddev *mddev)
}
EXPORT_SYMBOL(md_check_recovery);
-void md_reap_sync_thread(struct mddev *mddev)
+void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
{
struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
+ if (reconfig_mutex_held)
+ mddev_unlock(mddev);
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ if (reconfig_mutex_held)
+ mddev_lock_nointr(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
@@ -9652,7 +9635,6 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
struct md_rdev *rdev2, *tmp;
int role, ret;
- char b[BDEVNAME_SIZE];
/*
* If size is changed in another node then we need to
@@ -9676,7 +9658,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (test_bit(Candidate, &rdev2->flags)) {
if (role == MD_DISK_ROLE_FAULTY) {
- pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
+ pr_info("md: Removing Candidate device %pg because add failed\n",
+ rdev2->bdev);
md_kick_rdev_from_array(rdev2);
continue;
}
@@ -9693,8 +9676,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
- pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ pr_info("Activated spare: %pg\n",
+ rdev2->bdev);
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf2cbb17acbd..5f62c46ac2d3 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
extern void md_unregister_thread(struct md_thread **threadp);
extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev);
-extern void md_reap_sync_thread(struct mddev *mddev);
+extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e11701e394ca..78addfe4a0c9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -37,7 +37,6 @@ static void dump_zones(struct mddev *mddev)
int j, k;
sector_t zone_size = 0;
sector_t zone_start = 0;
- char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
@@ -48,9 +47,8 @@ static void dump_zones(struct mddev *mddev)
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
+ len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
+ conf->devlist[j * raid_disks + k]->bdev);
pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -69,8 +67,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
struct strip_zone *zone;
int cnt;
- char b[BDEVNAME_SIZE];
- char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
unsigned blksize = 512;
@@ -78,9 +74,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
if (!conf)
return -ENOMEM;
rdev_for_each(rdev1, mddev) {
- pr_debug("md/raid0:%s: looking at %s\n",
+ pr_debug("md/raid0:%s: looking at %pg\n",
mdname(mddev),
- bdevname(rdev1->bdev, b));
+ rdev1->bdev);
c = 0;
/* round size to chunk_size */
@@ -92,12 +88,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->bdev->bd_disk->queue));
rdev_for_each(rdev2, mddev) {
- pr_debug("md/raid0:%s: comparing %s(%llu)"
- " with %s(%llu)\n",
+ pr_debug("md/raid0:%s: comparing %pg(%llu)"
+ " with %pg(%llu)\n",
mdname(mddev),
- bdevname(rdev1->bdev,b),
+ rdev1->bdev,
(unsigned long long)rdev1->sectors,
- bdevname(rdev2->bdev,b2),
+ rdev2->bdev,
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
pr_debug("md/raid0:%s: END\n",
@@ -225,15 +221,15 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
if (rdev->sectors <= zone->dev_start) {
- pr_debug("md/raid0:%s: checking %s ... nope\n",
+ pr_debug("md/raid0:%s: checking %pg ... nope\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
continue;
}
- pr_debug("md/raid0:%s: checking %s ..."
+ pr_debug("md/raid0:%s: checking %pg ..."
" contained as device %d\n",
mdname(mddev),
- bdevname(rdev->bdev, b), c);
+ rdev->bdev, c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
@@ -362,7 +358,6 @@ static void free_conf(struct mddev *mddev, struct r0conf *conf)
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
}
static void raid0_free(struct mddev *mddev, void *priv)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 99d5af1362d7..258d4eb2d63c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -402,10 +402,9 @@ static void raid1_end_read_request(struct bio *bio)
/*
* oops, read error:
*/
- char b[BDEVNAME_SIZE];
- pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
@@ -1283,10 +1282,10 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
mirror = conf->mirrors + rdisk;
if (r1bio_existed)
- pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
mdname(mddev),
(unsigned long long)r1_bio->sector,
- bdevname(mirror->rdev->bdev, b));
+ mirror->rdev->bdev);
if (test_bit(WriteMostly, &mirror->rdev->flags) &&
bitmap) {
@@ -1659,7 +1658,6 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
*/
static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
unsigned long flags;
@@ -1686,9 +1684,9 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
- pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), rdev->bdev,
mdname(mddev), conf->raid_disks - mddev->degraded);
}
@@ -1706,13 +1704,12 @@ static void print_conf(struct r1conf *conf)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ rdev->bdev);
}
rcu_read_unlock();
}
@@ -2347,7 +2344,6 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
}
d = start;
while (d != read_disk) {
- char b[BDEVNAME_SIZE];
if (d==0)
d = conf->raid_disks * 2;
d--;
@@ -2360,11 +2356,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(sect +
rdev->data_offset),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
} else
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dfa576cdf11c..d589f823feb1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -397,10 +397,9 @@ static void raid10_end_read_request(struct bio *bio)
/*
* oops, read error - keep the refcount on the rdev
*/
- char b[BDEVNAME_SIZE];
- pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r10_bio->sector);
set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
@@ -1187,9 +1186,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
return;
}
if (err_rdev)
- pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
mdname(mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
@@ -1987,7 +1986,6 @@ static int enough(struct r10conf *conf, int ignore)
*/
static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r10conf *conf = mddev->private;
unsigned long flags;
@@ -2010,9 +2008,9 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
"md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), rdev->bdev,
mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
@@ -2032,13 +2030,12 @@ static void print_conf(struct r10conf *conf)
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
for (i = 0; i < conf->geo.raid_disks; i++) {
- char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ rdev->bdev);
}
}
@@ -2691,14 +2688,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
check_decay_read_errors(mddev, rdev);
atomic_inc(&rdev->read_errors);
if (atomic_read(&rdev->read_errors) > max_read_errors) {
- char b[BDEVNAME_SIZE];
- bdevname(rdev->bdev, b);
-
- pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
+ pr_notice("md/raid10:%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), rdev->bdev,
atomic_read(&rdev->read_errors), max_read_errors);
- pr_notice("md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %pg: Failing raid device\n",
+ mdname(mddev), rdev->bdev);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2768,8 +2762,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
/* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) {
- char b[BDEVNAME_SIZE];
-
if (sl==0)
sl = conf->copies;
sl--;
@@ -2788,24 +2780,22 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio,
rdev)),
- bdevname(rdev->bdev, b));
- pr_notice("md/raid10:%s: %s: failing drive\n",
+ rdev->bdev);
+ pr_notice("md/raid10:%s: %pg: failing drive\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
sl = start;
while (sl != r10_bio->read_slot) {
- char b[BDEVNAME_SIZE];
-
if (sl==0)
sl = conf->copies;
sl--;
@@ -2825,23 +2815,23 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
- bdevname(rdev->bdev, b));
- pr_notice("md/raid10:%s: %s: failing drive\n",
+ rdev->bdev);
+ pr_notice("md/raid10:%s: %pg: failing drive\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
break;
case 1:
- pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
atomic_add(s, &rdev->corrected_errors);
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 094a4042589e..83c184eddbda 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3064,11 +3064,10 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
- char b[BDEVNAME_SIZE];
int ret;
- pr_debug("md/raid:%s: using device %s as journal\n",
- mdname(conf->mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %pg as journal\n",
+ mdname(conf->mddev), rdev->bdev);
if (PAGE_SIZE != 4096)
return -EINVAL;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 55d065a87b89..973e2e06f19c 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -798,7 +798,6 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
int data_disks;
int i;
int ret = 0;
- char b[BDEVNAME_SIZE];
unsigned int pp_size = le32_to_cpu(e->pp_size);
unsigned int data_size = le32_to_cpu(e->data_size);
@@ -894,8 +893,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
break;
}
- pr_debug("%s:%*s reading data member disk %s sector %llu\n",
- __func__, indent, "", bdevname(rdev->bdev, b),
+ pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
+ __func__, indent, "", rdev->bdev,
(unsigned long long)sector);
if (!sync_page_io(rdev, sector, block_size, page2,
REQ_OP_READ, 0, false)) {
@@ -942,10 +941,10 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
conf->disks[sh.pd_idx].rdev, 1);
BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
- pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
+ pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
__func__, indent, "",
(unsigned long long)parity_sector,
- bdevname(parity_rdev->bdev, b));
+ parity_rdev->bdev);
if (!sync_page_io(parity_rdev, parity_sector, block_size,
page1, REQ_OP_WRITE, 0, false)) {
pr_debug("%s:%*s parity write error!\n", __func__,
@@ -1255,7 +1254,6 @@ void ppl_exit_log(struct r5conf *conf)
static int ppl_validate_rdev(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
int ppl_data_sectors;
int ppl_size_new;
@@ -1272,8 +1270,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
if (ppl_data_sectors <= 0) {
- pr_warn("md/raid:%s: PPL space too small on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space too small on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -ENOSPC;
}
@@ -1283,16 +1281,16 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
(rdev->ppl.sector >= rdev->data_offset &&
rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
- pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -EINVAL;
}
if (!rdev->mddev->external &&
((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
(rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
- pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -EINVAL;
}
@@ -1463,14 +1461,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
struct ppl_conf *ppl_conf = conf->log_private;
struct ppl_log *log;
int ret = 0;
- char b[BDEVNAME_SIZE];
if (!rdev)
return -EINVAL;
- pr_debug("%s: disk: %d operation: %s dev: %s\n",
+ pr_debug("%s: disk: %d operation: %s dev: %pg\n",
__func__, rdev->raid_disk, add ? "add" : "remove",
- bdevname(rdev->bdev, b));
+ rdev->bdev);
if (rdev->raid_disk < 0)
return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 39038fa8b1c8..5d09256d7f81 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2686,7 +2686,6 @@ static void raid5_end_read_request(struct bio * bi)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
sector_t s;
@@ -2723,10 +2722,10 @@ static void raid5_end_read_request(struct bio * bi)
* any error
*/
pr_info_ratelimited(
- "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %pg)\n",
mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
(unsigned long long)s,
- bdevname(rdev->bdev, b));
+ rdev->bdev);
atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -2743,7 +2742,6 @@ static void raid5_end_read_request(struct bio * bi)
if (atomic_read(&rdev->read_errors))
atomic_set(&rdev->read_errors, 0);
} else {
- const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
int set_bad = 0;
@@ -2752,25 +2750,25 @@ static void raid5_end_read_request(struct bio * bi)
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
pr_warn_ratelimited(
- "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
+ "md/raid:%s: read error on replacement device (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
pr_warn_ratelimited(
- "md/raid:%s: read error not correctable (sector %llu on %s).\n",
+ "md/raid:%s: read error not correctable (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
pr_warn_ratelimited(
- "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes) {
if (!test_bit(Faulty, &rdev->flags)) {
@@ -2778,8 +2776,8 @@ static void raid5_end_read_request(struct bio * bi)
mdname(conf->mddev),
atomic_read(&rdev->read_errors),
conf->max_nr_stripes);
- pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
- mdname(conf->mddev), bdn);
+ pr_warn("md/raid:%s: Too many read errors, failing device %pg.\n",
+ mdname(conf->mddev), rdev->bdev);
}
} else
retry = 1;
@@ -2891,13 +2889,12 @@ static void raid5_end_write_request(struct bio *bi)
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
unsigned long flags;
pr_debug("raid456: error called\n");
- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n",
+ mdname(mddev), rdev->bdev);
spin_lock_irqsave(&conf->device_lock, flags);
set_bit(Faulty, &rdev->flags);
@@ -7359,9 +7356,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
if (test_bit(In_sync, &rdev->flags)) {
- char b[BDEVNAME_SIZE];
- pr_info("md/raid:%s: device %s operational as raid disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %pg operational as raid disk %d\n",
+ mdname(mddev), rdev->bdev, raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -7877,12 +7873,11 @@ static void print_raid5_conf (struct r5conf *conf)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev)
- pr_debug(" disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%pg\n",
i, !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rcu_read_unlock();
}
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index c12dda73cdd5..3155e876616d 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -773,7 +773,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, ati_remote->endpoint_in->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->irq_urb, udev, pipe, ati_remote->inbuf,
@@ -784,7 +784,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
/* Set up out_urb */
pipe = usb_sndintpipe(udev, ati_remote->endpoint_out->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->out_urb, udev, pipe, ati_remote->outbuf,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 2dc810f5a73f..0834d5f866fd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1728,7 +1728,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
else
pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
if (!ir)
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 16ba85d7c090..deb85330c940 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -307,7 +307,7 @@ static int streamzap_probe(struct usb_interface *intf,
}
pipe = usb_rcvintpipe(usbdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(usbdev, pipe);
if (maxp == 0) {
dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n",
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
index 98d0b43608ad..7424b2031152 100644
--- a/drivers/media/rc/xbox_remote.c
+++ b/drivers/media/rc/xbox_remote.c
@@ -171,7 +171,7 @@ static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 8c2725e4105b..ee04973cbf93 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -120,7 +120,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 84602edf3fe8..5136e9e202f1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -340,7 +340,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
dev->int_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
dprintk(1, "IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index e293f6f3d1bc..d855a19551f3 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -570,7 +570,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
dev->isoc_in.endp->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
if (size > dev->isoc_in.maxsize)
size = dev->isoc_in.maxsize;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 3d5b14c60e20..0be5731685b4 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -22,6 +22,8 @@
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6393xb.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#define SCR_REVID 0x08 /* b Revision ID */
@@ -87,8 +89,10 @@
struct tc6393xb {
void __iomem *scr;
+ struct device *dev;
struct gpio_chip gpio;
+ struct gpio_desc *vcc_on;
struct clk *clk; /* 3,6 Mhz */
@@ -497,17 +501,93 @@ static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
+/*
+ * TC6393XB GPIOs as used on TOSA, are the only user of this chip.
+ * GPIOs 2, 5, 8 and 13 are not connected.
+ */
+#define TOSA_GPIO_TG_ON 0
+#define TOSA_GPIO_L_MUTE 1
+#define TOSA_GPIO_BL_C20MA 3
+#define TOSA_GPIO_CARD_VCC_ON 4
+#define TOSA_GPIO_CHARGE_OFF 6
+#define TOSA_GPIO_CHARGE_OFF_JC 7
+#define TOSA_GPIO_BAT0_V_ON 9
+#define TOSA_GPIO_BAT1_V_ON 10
+#define TOSA_GPIO_BU_CHRG_ON 11
+#define TOSA_GPIO_BAT_SW_ON 12
+#define TOSA_GPIO_BAT0_TH_ON 14
+#define TOSA_GPIO_BAT1_TH_ON 15
+
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_gpio_lookup, "spi2.0", "tc6393xb",
+ TOSA_GPIO_TG_ON, "tg #pwr", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_bl_gpio_lookup, "i2c-tos-bl", "tc6393xb",
+ TOSA_GPIO_BL_C20MA, "backlight", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_audio_gpio_lookup, "tosa-audio", "tc6393xb",
+ TOSA_GPIO_L_MUTE, NULL, GPIO_ACTIVE_HIGH);
+
+static struct gpiod_lookup_table tosa_battery_gpio_lookup = {
+ .dev_id = "wm97xx-battery",
+ .table = {
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF,
+ "main charge off", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF_JC,
+ "jacket charge off", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_V_ON,
+ "main battery", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_V_ON,
+ "jacket battery", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BU_CHRG_ON,
+ "backup battery", GPIO_ACTIVE_HIGH),
+ /* BAT1 and BAT0 thermistors appear to be swapped */
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_TH_ON,
+ "main battery temp", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_TH_ON,
+ "jacket battery temp", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT_SW_ON,
+ "battery switch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct gpiod_lookup_table *tc6393xb_gpio_lookups[] = {
+ &tosa_lcd_gpio_lookup,
+ &tosa_lcd_bl_gpio_lookup,
+ &tosa_audio_gpio_lookup,
+ &tosa_battery_gpio_lookup,
+};
+
+static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb)
{
- tc6393xb->gpio.label = "tc6393xb";
- tc6393xb->gpio.base = gpio_base;
- tc6393xb->gpio.ngpio = 16;
- tc6393xb->gpio.set = tc6393xb_gpio_set;
- tc6393xb->gpio.get = tc6393xb_gpio_get;
- tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
- tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
-
- return gpiochip_add_data(&tc6393xb->gpio, tc6393xb);
+ struct gpio_chip *gc = &tc6393xb->gpio;
+ struct device *dev = tc6393xb->dev;
+ int ret;
+
+ gc->label = "tc6393xb";
+ gc->base = -1; /* Dynamic allocation */
+ gc->ngpio = 16;
+ gc->set = tc6393xb_gpio_set;
+ gc->get = tc6393xb_gpio_get;
+ gc->direction_input = tc6393xb_gpio_direction_input;
+ gc->direction_output = tc6393xb_gpio_direction_output;
+
+ ret = devm_gpiochip_add_data(dev, gc, tc6393xb);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add GPIO chip\n");
+
+ /* Register descriptor look-ups for consumers */
+ gpiod_add_lookup_tables(tc6393xb_gpio_lookups, ARRAY_SIZE(tc6393xb_gpio_lookups));
+
+ /* Request some of our own GPIOs */
+ tc6393xb->vcc_on = gpiochip_request_own_desc(gc, TOSA_GPIO_CARD_VCC_ON, "VCC ON",
+ GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
+ if (IS_ERR(tc6393xb->vcc_on))
+ return dev_err_probe(dev, PTR_ERR(tc6393xb->vcc_on),
+ "failed to request VCC ON GPIO\n");
+
+ return 0;
}
/*--------------------------------------------------------------------------*/
@@ -617,6 +697,7 @@ static int tc6393xb_probe(struct platform_device *dev)
ret = -ENOMEM;
goto err_kzalloc;
}
+ tc6393xb->dev = &dev->dev;
raw_spin_lock_init(&tc6393xb->lock);
@@ -676,22 +757,12 @@ static int tc6393xb_probe(struct platform_device *dev)
tmio_ioread8(tc6393xb->scr + SCR_REVID),
(unsigned long) iomem->start, tc6393xb->irq);
- tc6393xb->gpio.base = -1;
-
- if (tcpd->gpio_base >= 0) {
- ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
- if (ret)
- goto err_gpio_add;
- }
+ ret = tc6393xb_register_gpio(tc6393xb);
+ if (ret)
+ goto err_gpio_add;
tc6393xb_attach_irq(dev);
- if (tcpd->setup) {
- ret = tcpd->setup(dev);
- if (ret)
- goto err_setup;
- }
-
tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
sizeof(*tcpd->nand_data);
@@ -705,15 +776,8 @@ static int tc6393xb_probe(struct platform_device *dev)
if (!ret)
return 0;
- if (tcpd->teardown)
- tcpd->teardown(dev);
-
-err_setup:
tc6393xb_detach_irq(dev);
-
err_gpio_add:
- if (tc6393xb->gpio.base != -1)
- gpiochip_remove(&tc6393xb->gpio);
tcpd->disable(dev);
err_enable:
clk_disable_unprepare(tc6393xb->clk);
@@ -738,14 +802,8 @@ static int tc6393xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
- if (tcpd->teardown)
- tcpd->teardown(dev);
-
tc6393xb_detach_irq(dev);
- if (tc6393xb->gpio.base != -1)
- gpiochip_remove(&tc6393xb->gpio);
-
ret = tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 92c0611034b0..075f3a36d512 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -530,11 +530,8 @@ exit_done:
}
break;
case OP_SWP:
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
break;
case OP_ADD:
if (altera_check_stack(stack_ptr, 2, &status)) {
@@ -912,34 +909,22 @@ exit_done:
*/
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 7 */
index = 7 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 6 */
index = 6 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* DUPN 8 */
index = 8 + 1;
@@ -950,18 +935,12 @@ exit_done:
/* SWPN 2 */
index = 2 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* DUPN 6 */
index = 6 + 1;
@@ -1075,11 +1054,8 @@ exit_done:
* to swap with top element
*/
index = (args[0]) + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
break;
case OP_DUPN:
/*
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
index 066b9ef7fcd7..3c081504f38c 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.c
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -757,20 +757,19 @@ static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
u16 q_num,
u16 msg_id)
{
- bool found = false;
- struct bcm_vk_wkent *entry;
+ struct bcm_vk_wkent *entry = NULL, *iter;
spin_lock(&chan->pendq_lock);
- list_for_each_entry(entry, &chan->pendq[q_num], node) {
- if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
- list_del(&entry->node);
- found = true;
+ list_for_each_entry(iter, &chan->pendq[q_num], node) {
+ if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
+ list_del(&iter->node);
+ entry = iter;
bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
break;
}
}
spin_unlock(&chan->pendq_lock);
- return ((found) ? entry : NULL);
+ return entry;
}
s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
@@ -1010,16 +1009,14 @@ ssize_t bcm_vk_read(struct file *p_file,
miscdev);
struct device *dev = &vk->pdev->dev;
struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
- struct bcm_vk_wkent *entry = NULL;
+ struct bcm_vk_wkent *entry = NULL, *iter;
u32 q_num;
u32 rsp_length;
- bool found = false;
if (!bcm_vk_drv_access_ok(vk))
return -EPERM;
dev_dbg(dev, "Buf count %zu\n", count);
- found = false;
/*
* search through the pendq on the to_h chan, and return only those
@@ -1028,13 +1025,13 @@ ssize_t bcm_vk_read(struct file *p_file,
*/
spin_lock(&chan->pendq_lock);
for (q_num = 0; q_num < chan->q_nr; q_num++) {
- list_for_each_entry(entry, &chan->pendq[q_num], node) {
- if (entry->ctx->idx == ctx->idx) {
+ list_for_each_entry(iter, &chan->pendq[q_num], node) {
+ if (iter->ctx->idx == ctx->idx) {
if (count >=
- (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
- list_del(&entry->node);
+ (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
+ list_del(&iter->node);
atomic_dec(&ctx->pend_cnt);
- found = true;
+ entry = iter;
} else {
/* buffer not big enough */
rc = -EMSGSIZE;
@@ -1046,7 +1043,7 @@ ssize_t bcm_vk_read(struct file *p_file,
read_loop_exit:
spin_unlock(&chan->pendq_lock);
- if (found) {
+ if (entry) {
/* retrieve the passed down msg_id */
set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index 3f514d77a843..9080f9f150a2 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -317,12 +317,15 @@ static int alcor_pci_probe(struct pci_dev *pdev,
ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells,
ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL);
if (ret < 0)
- goto error_release_regions;
+ goto error_clear_drvdata;
alcor_pci_aspm_ctrl(priv, 0);
return 0;
+error_clear_drvdata:
+ pci_clear_master(pdev);
+ pci_set_drvdata(pdev, NULL);
error_release_regions:
pci_release_regions(pdev);
error_free_ida:
@@ -343,6 +346,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
ida_free(&alcor_pci_idr, priv->id);
pci_release_regions(pdev);
+ pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index a77585ab0f30..749cc5a46d13 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -57,40 +57,6 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
0xFF, driving[drive_sel][2]);
}
-static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
- struct pci_dev *pdev = pcr->pci;
- u32 reg;
-
- /* 0x814~0x817 */
- pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
- pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-
- if (!rts5261_vendor_setting_valid(reg)) {
- /* Not support MMC default */
- pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
- pcr_dbg(pcr, "skip fetch vendor setting\n");
- return;
- }
-
- if (!rts5261_reg_check_mmc_support(reg))
- pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
-
- /* TO do: need to add rtd3 function */
- pcr->rtd3_en = rts5261_reg_to_rtd3(reg);
-
- if (rts5261_reg_check_reverse_socket(reg))
- pcr->flags |= PCR_REVERSE_SOCKET;
-
- /* 0x724~0x727 */
- pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
- pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
- pcr->aspm_en = rts5261_reg_to_aspm(reg);
- pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
- pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
-}
-
static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
@@ -391,11 +357,11 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
}
-static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
- int retval;
- u32 lval, i;
+ u32 lval1, lval2, i;
+ u16 setting_reg1, setting_reg2;
u8 valid, efuse_valid, tmp;
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
@@ -418,26 +384,70 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
efuse_valid = ((tmp & 0x0C) >> 2);
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
- if (efuse_valid == 0) {
- retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
- if (retval != 0)
- pcr_dbg(pcr, "read 0x814 DW fail\n");
- pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
- /* 0x816 */
- valid = (u8)((lval >> 16) & 0x03);
- pcr_dbg(pcr, "0x816: %d\n", valid);
- }
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+ /* 0x816 */
+ valid = (u8)((lval2 >> 16) & 0x03);
+
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
- pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
- lval = lval & 0x00FFFFFF;
- retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
- if (retval != 0)
- pcr_dbg(pcr, "write config fail\n");
+ if (efuse_valid == 2 || efuse_valid == 3) {
+ if (valid == 3) {
+ /* Bypass efuse */
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ /* Use efuse data */
+ setting_reg1 = PCR_SETTING_REG4;
+ setting_reg2 = PCR_SETTING_REG5;
+ }
+ } else if (efuse_valid == 0) {
+ // default
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ }
+
+ pci_read_config_dword(pdev, setting_reg2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+ if (!rts5261_vendor_setting_valid(lval2)) {
+ /* Not support MMC default */
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ if (!rts5261_reg_check_mmc_support(lval2))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
- return retval;
+ pcr->rtd3_en = rts5261_reg_to_rtd3(lval2);
+
+ if (rts5261_reg_check_reverse_socket(lval2))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ pci_read_config_dword(pdev, setting_reg1, &lval1);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(lval1);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1);
+
+ if (setting_reg1 == PCR_SETTING_REG1) {
+ /* store setting */
+ rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+ pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+ lval2 = lval2 & 0x00FFFFFF;
+ pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+ }
}
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
@@ -636,7 +646,6 @@ static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
}
static const struct pcr_ops rts5261_pcr_ops = {
- .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
.turn_on_led = rts5261_turn_on_led,
.turn_off_led = rts5261_turn_off_led,
.extra_init_hw = rts5261_extra_init_hw,
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59eda55d92a3..1ef9b61077c4 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
return 0;
out_init_fail:
+ usb_set_intfdata(ucr->pusb_intf, NULL);
usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
ucr->iobuf_dma);
return ret;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index d80ada8cac09..93ebd174d848 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1606,17 +1606,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
struct fastrpc_req_munmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_buf *buf, *b;
+ struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
spin_lock(&fl->lock);
- list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
- if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+ if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
+ buf = iter;
break;
- buf = NULL;
+ }
}
spin_unlock(&fl->lock);
@@ -1747,17 +1748,18 @@ err_invoke:
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_map *map = NULL, *m;
+ struct fastrpc_map *map = NULL, *iter, *m;
struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
int err = 0;
u32 sc;
struct device *dev = fl->sctx->dev;
spin_lock(&fl->lock);
- list_for_each_entry_safe(map, m, &fl->maps, node) {
- if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr))
+ list_for_each_entry_safe(iter, m, &fl->maps, node) {
+ if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
+ map = iter;
break;
- map = NULL;
+ }
}
spin_unlock(&fl->lock);
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 6ebe3c7001ff..934a3a4aedc9 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o
+ common/state_dump.o common/memory_mgr.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index a507110f6443..e13b2b39c058 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -160,24 +160,6 @@ static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
}
}
-static void cb_release(struct kref *ref)
-{
- struct hl_device *hdev;
- struct hl_cb *cb;
-
- cb = container_of(ref, struct hl_cb, refcount);
- hdev = cb->hdev;
-
- hl_debugfs_remove_cb(cb);
-
- if (cb->is_mmu_mapped)
- cb_unmap_mem(cb->ctx, cb);
-
- hl_ctx_put(cb->ctx);
-
- cb_do_release(hdev, cb);
-}
-
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
int ctx_id, bool internal_cb)
{
@@ -238,168 +220,175 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
return cb;
}
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
- bool map_cb, u64 *handle)
+struct hl_cb_mmap_mem_alloc_args {
+ struct hl_device *hdev;
+ struct hl_ctx *ctx;
+ u32 cb_size;
+ bool internal_cb;
+ bool map_cb;
+};
+
+static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
{
- struct hl_cb *cb;
- bool alloc_new_cb = true;
- int rc, ctx_id = ctx->asid;
+ struct hl_cb *cb = buf->private;
- /*
- * Can't use generic function to check this because of special case
- * where we create a CB as part of the reset process
- */
- if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
- dev_warn_ratelimited(hdev->dev,
- "Device is disabled or in reset. Can't create new CBs\n");
- rc = -EBUSY;
- goto out_err;
- }
+ hl_debugfs_remove_cb(cb);
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size %d must be less than %d\n",
- cb_size, SZ_2M);
- rc = -EINVAL;
- goto out_err;
- }
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
+
+ hl_ctx_put(cb->ctx);
- if (!internal_cb) {
+ cb_do_release(cb->hdev, cb);
+}
+
+static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
+{
+ struct hl_cb_mmap_mem_alloc_args *cb_args = args;
+ struct hl_cb *cb;
+ int rc, ctx_id = cb_args->ctx->asid;
+ bool alloc_new_cb = true;
+
+ if (!cb_args->internal_cb) {
/* Minimum allocation must be PAGE SIZE */
- if (cb_size < PAGE_SIZE)
- cb_size = PAGE_SIZE;
+ if (cb_args->cb_size < PAGE_SIZE)
+ cb_args->cb_size = PAGE_SIZE;
if (ctx_id == HL_KERNEL_ASID_ID &&
- cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+ cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
- spin_lock(&hdev->cb_pool_lock);
- if (!list_empty(&hdev->cb_pool)) {
- cb = list_first_entry(&hdev->cb_pool,
+ spin_lock(&cb_args->hdev->cb_pool_lock);
+ if (!list_empty(&cb_args->hdev->cb_pool)) {
+ cb = list_first_entry(&cb_args->hdev->cb_pool,
typeof(*cb), pool_list);
list_del(&cb->pool_list);
- spin_unlock(&hdev->cb_pool_lock);
+ spin_unlock(&cb_args->hdev->cb_pool_lock);
alloc_new_cb = false;
} else {
- spin_unlock(&hdev->cb_pool_lock);
- dev_dbg(hdev->dev, "CB pool is empty\n");
+ spin_unlock(&cb_args->hdev->cb_pool_lock);
+ dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
}
}
}
if (alloc_new_cb) {
- cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
- if (!cb) {
- rc = -ENOMEM;
- goto out_err;
- }
+ cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
+ if (!cb)
+ return -ENOMEM;
}
- cb->hdev = hdev;
- cb->ctx = ctx;
- hl_ctx_get(hdev, cb->ctx);
+ cb->hdev = cb_args->hdev;
+ cb->ctx = cb_args->ctx;
+ cb->buf = buf;
+ cb->buf->mappable_size = cb->size;
+ cb->buf->private = cb;
+
+ hl_ctx_get(cb->ctx);
- if (map_cb) {
+ if (cb_args->map_cb) {
if (ctx_id == HL_KERNEL_ASID_ID) {
- dev_err(hdev->dev,
+ dev_err(cb_args->hdev->dev,
"CB mapping is not supported for kernel context\n");
rc = -EINVAL;
goto release_cb;
}
- rc = cb_map_mem(ctx, cb);
+ rc = cb_map_mem(cb_args->ctx, cb);
if (rc)
goto release_cb;
}
- spin_lock(&mgr->cb_lock);
- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
- spin_unlock(&mgr->cb_lock);
-
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
- goto unmap_mem;
- }
-
- cb->id = (u64) rc;
-
- kref_init(&cb->refcount);
- spin_lock_init(&cb->lock);
-
- /*
- * idr is 32-bit so we can safely OR it with a mask that is above
- * 32 bit
- */
- *handle = cb->id | HL_MMAP_TYPE_CB;
- *handle <<= PAGE_SHIFT;
-
hl_debugfs_add_cb(cb);
return 0;
-unmap_mem:
- if (cb->is_mmu_mapped)
- cb_unmap_mem(cb->ctx, cb);
release_cb:
hl_ctx_put(cb->ctx);
- cb_do_release(hdev, cb);
-out_err:
- *handle = 0;
+ cb_do_release(cb_args->hdev, cb);
return rc;
}
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
+static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
+ struct vm_area_struct *vma, void *args)
{
- struct hl_cb *cb;
- u32 handle;
- int rc = 0;
+ struct hl_cb *cb = buf->private;
- /*
- * handle was given to user to do mmap, I need to shift it back to
- * how the idr module gave it to me
- */
- cb_handle >>= PAGE_SHIFT;
- handle = (u32) cb_handle;
+ return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
+ cb->bus_address, cb->size);
+}
- spin_lock(&mgr->cb_lock);
+static struct hl_mmap_mem_buf_behavior cb_behavior = {
+ .topic = "CB",
+ .mem_id = HL_MMAP_TYPE_CB,
+ .alloc = hl_cb_mmap_mem_alloc,
+ .release = hl_cb_mmap_mem_release,
+ .mmap = hl_cb_mmap,
+};
- cb = idr_find(&mgr->cb_handles, handle);
- if (cb) {
- idr_remove(&mgr->cb_handles, handle);
- spin_unlock(&mgr->cb_lock);
- kref_put(&cb->refcount, cb_release);
- } else {
- spin_unlock(&mgr->cb_lock);
- dev_err(hdev->dev,
- "CB destroy failed, no match to handle 0x%x\n", handle);
- rc = -EINVAL;
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
+ struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
+ bool map_cb, u64 *handle)
+{
+ struct hl_cb_mmap_mem_alloc_args args = {
+ .hdev = hdev,
+ .ctx = ctx,
+ .cb_size = cb_size,
+ .internal_cb = internal_cb,
+ .map_cb = map_cb,
+ };
+ struct hl_mmap_mem_buf *buf;
+ int ctx_id = ctx->asid;
+
+ if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't create new CBs\n");
+ return -EBUSY;
}
- return rc;
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size %d must be less than %d\n",
+ cb_size, SZ_2M);
+ return -EINVAL;
+ }
+
+ buf = hl_mmap_mem_buf_alloc(
+ mmg, &cb_behavior,
+ ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
+ if (!buf)
+ return -ENOMEM;
+
+ *handle = buf->handle;
+
+ return 0;
+}
+
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
+{
+ int rc;
+
+ rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
+ if (rc < 0)
+ return rc; /* Invalid handle */
+
+ if (rc == 0)
+ dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
+
+ return 0;
}
-static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
+static int hl_cb_info(struct hl_mem_mgr *mmg,
+ u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
struct hl_vm_va_block *va_block;
struct hl_cb *cb;
- u32 handle;
int rc = 0;
- /* The CB handle was given to user to do mmap, so need to shift it back
- * to the value which was allocated by the IDR module.
- */
- cb_handle >>= PAGE_SHIFT;
- handle = (u32) cb_handle;
-
- spin_lock(&mgr->cb_lock);
-
- cb = idr_find(&mgr->cb_handles, handle);
+ cb = hl_cb_get(mmg, handle);
if (!cb) {
- dev_err(hdev->dev,
- "CB info failed, no match to handle 0x%x\n", handle);
- rc = -EINVAL;
- goto out;
+ dev_err(mmg->dev,
+ "CB info failed, no match to handle 0x%llx\n", handle);
+ return -EINVAL;
}
if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
@@ -407,7 +396,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
if (va_block) {
*device_va = va_block->start;
} else {
- dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
+ dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
rc = -EINVAL;
goto out;
}
@@ -416,7 +405,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
}
out:
- spin_unlock(&mgr->cb_lock);
+ hl_cb_put(cb);
return rc;
}
@@ -444,7 +433,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
} else {
- rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
+ rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
args->in.cb_size, false,
!!(args->in.flags & HL_CB_FLAGS_MAP),
&handle);
@@ -455,12 +444,12 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_CB_OP_DESTROY:
- rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
+ rc = hl_cb_destroy(&hpriv->mem_mgr,
args->in.cb_handle);
break;
case HL_CB_OP_INFO:
- rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
+ rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
args->in.flags,
&usage_cnt,
&device_va);
@@ -483,163 +472,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
-static void cb_vm_close(struct vm_area_struct *vma)
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
{
- struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
- long new_mmap_size;
-
- new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
-
- if (new_mmap_size > 0) {
- cb->mmap_size = new_mmap_size;
- return;
- }
-
- spin_lock(&cb->lock);
- cb->mmap = false;
- spin_unlock(&cb->lock);
-
- hl_cb_put(cb);
- vma->vm_private_data = NULL;
-}
-
-static const struct vm_operations_struct cb_vm_ops = {
- .close = cb_vm_close
-};
-
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
-{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_cb *cb;
- u32 handle, user_cb_size;
- int rc;
-
- /* We use the page offset to hold the idr and thus we need to clear
- * it before doing the mmap itself
- */
- handle = vma->vm_pgoff;
- vma->vm_pgoff = 0;
-
- /* reference was taken here */
- cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
- if (!cb) {
- dev_err(hdev->dev,
- "CB mmap failed, no match to handle 0x%x\n", handle);
- return -EINVAL;
- }
-
- /* Validation check */
- user_cb_size = vma->vm_end - vma->vm_start;
- if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
- dev_err(hdev->dev,
- "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
- vma->vm_end - vma->vm_start, cb->size);
- rc = -EINVAL;
- goto put_cb;
- }
-
- if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
- user_cb_size)) {
- dev_err(hdev->dev,
- "user pointer is invalid - 0x%lx\n",
- vma->vm_start);
-
- rc = -EINVAL;
- goto put_cb;
- }
-
- spin_lock(&cb->lock);
+ struct hl_mmap_mem_buf *buf;
- if (cb->mmap) {
- dev_err(hdev->dev,
- "CB mmap failed, CB already mmaped to user\n");
- rc = -EINVAL;
- goto release_lock;
- }
-
- cb->mmap = true;
-
- spin_unlock(&cb->lock);
-
- vma->vm_ops = &cb_vm_ops;
-
- /*
- * Note: We're transferring the cb reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = cb;
-
- rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
- cb->bus_address, cb->size);
- if (rc) {
- spin_lock(&cb->lock);
- cb->mmap = false;
- goto release_lock;
- }
-
- cb->mmap_size = cb->size;
- vma->vm_pgoff = handle;
-
- return 0;
-
-release_lock:
- spin_unlock(&cb->lock);
-put_cb:
- hl_cb_put(cb);
- return rc;
-}
-
-struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 handle)
-{
- struct hl_cb *cb;
-
- spin_lock(&mgr->cb_lock);
- cb = idr_find(&mgr->cb_handles, handle);
-
- if (!cb) {
- spin_unlock(&mgr->cb_lock);
- dev_warn(hdev->dev,
- "CB get failed, no match to handle 0x%x\n", handle);
+ buf = hl_mmap_mem_buf_get(mmg, handle);
+ if (!buf)
return NULL;
- }
-
- kref_get(&cb->refcount);
-
- spin_unlock(&mgr->cb_lock);
-
- return cb;
+ return buf->private;
}
void hl_cb_put(struct hl_cb *cb)
{
- kref_put(&cb->refcount, cb_release);
-}
-
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
-{
- spin_lock_init(&mgr->cb_lock);
- idr_init(&mgr->cb_handles);
-}
-
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
-{
- struct hl_cb *cb;
- struct idr *idp;
- u32 id;
-
- idp = &mgr->cb_handles;
-
- idr_for_each_entry(idp, cb, id) {
- if (kref_put(&cb->refcount, cb_release) != 1)
- dev_err(hdev->dev,
- "CB %d for CTX ID %d is still alive\n",
- id, cb->ctx->asid);
- }
-
- idr_destroy(&mgr->cb_handles);
+ hl_mmap_mem_buf_put(cb->buf);
}
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
@@ -649,7 +495,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
struct hl_cb *cb;
int rc;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
@@ -657,8 +503,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
return NULL;
}
- cb_handle >>= PAGE_SHIFT;
- cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
+ cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
/* hl_cb_get should never fail here */
if (!cb) {
dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
@@ -669,7 +514,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
return cb;
destroy_cb:
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
return NULL;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index d93ef9f1c45c..fb30b7de4aab 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -407,8 +407,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
- bool next_entry_found = false;
- struct hl_cs *next, *first_cs;
+ struct hl_cs *next = NULL, *iter, *first_cs;
if (!cs_needs_timeout(cs))
return;
@@ -443,13 +442,13 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
spin_lock(&hdev->cs_mirror_lock);
/* queue TDR for next CS */
- list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
- if (cs_needs_timeout(next)) {
- next_entry_found = true;
+ list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
+ if (cs_needs_timeout(iter)) {
+ next = iter;
break;
}
- if (next_entry_found && !next->tdr_active) {
+ if (next && !next->tdr_active) {
next->tdr_active = true;
schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
}
@@ -736,11 +735,10 @@ static void cs_timedout(struct work_struct *work)
hdev = cs->ctx->hdev;
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1);
+ rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1);
if (!rc) {
- hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
- hdev->last_error.cs_timeout_timestamp = ktime_get();
- hdev->last_error.cs_timeout_seq = cs->sequence;
+ hdev->last_error.cs_timeout.timestamp = ktime_get();
+ hdev->last_error.cs_timeout.seq = cs->sequence;
}
switch (cs->type) {
@@ -806,7 +804,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
}
/* increment refcnt for context */
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
cs->ctx = ctx;
cs->submitted = false;
@@ -958,9 +956,9 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
- if (pend->ts_reg_info.ts_buff) {
+ if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
- hl_ts_put(pend->ts_reg_info.ts_buff);
+ hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
hl_cb_put(pend->ts_reg_info.cq_cb);
} else {
pend->fence.error = -EIO;
@@ -1072,17 +1070,14 @@ static int validate_queue_index(struct hl_device *hdev,
}
static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
+ struct hl_mem_mgr *mmg,
struct hl_cs_chunk *chunk)
{
struct hl_cb *cb;
- u32 cb_handle;
- cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
-
- cb = hl_cb_get(hdev, cb_mgr, cb_handle);
+ cb = hl_cb_get(mmg, chunk->cb_handle);
if (!cb) {
- dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
+ dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
return NULL;
}
@@ -1344,7 +1339,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (is_kernel_allocated_cb) {
- cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
if (!cb) {
atomic64_inc(
&ctx->cs_counters.validation_drop_cnt);
@@ -1772,7 +1767,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
*/
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size;
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
cs_get(cs);
@@ -1834,7 +1829,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
handle->count = count;
- hl_ctx_get(hdev, hpriv->ctx);
+ hl_ctx_get(hpriv->ctx);
handle->ctx = hpriv->ctx;
mgr = &hpriv->ctx->sig_mgr;
@@ -2528,7 +2523,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (timestamp)
*timestamp = 0;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
fence = hl_ctx_get_fence(ctx, seq);
@@ -2668,7 +2663,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct multi_cs_completion *mcs_compl;
struct hl_device *hdev = hpriv->hdev;
- struct multi_cs_data mcs_data = {0};
+ struct multi_cs_data mcs_data = {};
union hl_wait_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
struct hl_fence **fence_arr;
@@ -2719,7 +2714,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
mcs_data.fence_arr = fence_arr;
mcs_data.arr_len = seq_arr_len;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
/* wait (with timeout) for the first CS to be completed */
mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
@@ -2868,12 +2863,13 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return 0;
}
-static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
+static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_cb *cq_cb,
u64 ts_offset, u64 cq_offset, u64 target_value,
spinlock_t *wait_list_lock,
struct hl_user_pending_interrupt **pend)
{
+ struct hl_ts_buff *ts_buff = buf->private;
struct hl_user_pending_interrupt *requested_offset_record =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
ts_offset;
@@ -2885,7 +2881,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
/* Validate ts_offset not exceeding last max */
if (requested_offset_record > cb_last) {
- dev_err(ts_buff->hdev->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
+ dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
(u64)(uintptr_t)cb_last);
return -EINVAL;
}
@@ -2904,18 +2900,21 @@ start_over:
list_del(&requested_offset_record->wait_list_node);
spin_unlock_irqrestore(wait_list_lock, flags);
- hl_ts_put(requested_offset_record->ts_reg_info.ts_buff);
+ hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
- dev_dbg(ts_buff->hdev->dev, "ts node removed from interrupt list now can re-use\n");
+ dev_dbg(buf->mmg->dev,
+ "ts node removed from interrupt list now can re-use\n");
} else {
- dev_dbg(ts_buff->hdev->dev, "ts node in middle of irq handling\n");
+ dev_dbg(buf->mmg->dev,
+ "ts node in middle of irq handling\n");
/* irq handling in the middle give it time to finish */
spin_unlock_irqrestore(wait_list_lock, flags);
usleep_range(1, 10);
if (++iter_counter == MAX_TS_ITER_NUM) {
- dev_err(ts_buff->hdev->dev, "handling registration interrupt took too long!!\n");
+ dev_err(buf->mmg->dev,
+ "handling registration interrupt took too long!!\n");
return -EINVAL;
}
@@ -2927,7 +2926,7 @@ start_over:
/* Fill up the new registration node info */
requested_offset_record->ts_reg_info.in_use = 1;
- requested_offset_record->ts_reg_info.ts_buff = ts_buff;
+ requested_offset_record->ts_reg_info.buf = buf;
requested_offset_record->ts_reg_info.cq_cb = cq_cb;
requested_offset_record->ts_reg_info.timestamp_kernel_addr =
(u64 *) ts_buff->user_buff_address + ts_offset;
@@ -2937,21 +2936,20 @@ start_over:
*pend = requested_offset_record;
- dev_dbg(ts_buff->hdev->dev, "Found available node in TS kernel CB(0x%llx)\n",
+ dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n",
(u64)(uintptr_t)requested_offset_record);
return 0;
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cb_mgr *cb_mgr, struct hl_ts_mgr *ts_mgr,
+ struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
u64 target_value, struct hl_user_interrupt *interrupt,
bool register_ts_record, u64 ts_handle, u64 ts_offset,
u32 *status, u64 *timestamp)
{
- u32 cq_patched_handle, ts_patched_handle;
struct hl_user_pending_interrupt *pend;
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
unsigned long timeout, flags;
long completion_rc;
@@ -2959,10 +2957,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
timeout = hl_usecs64_to_jiffies(timeout_us);
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
- cq_patched_handle = lower_32_bits(cq_counters_handle >> PAGE_SHIFT);
- cq_cb = hl_cb_get(hdev, cb_mgr, cq_patched_handle);
+ cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
if (!cq_cb) {
rc = -EINVAL;
goto put_ctx;
@@ -2971,16 +2968,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
-
- ts_patched_handle = lower_32_bits(ts_handle >> PAGE_SHIFT);
- ts_buff = hl_ts_get(hdev, ts_mgr, ts_patched_handle);
- if (!ts_buff) {
+ buf = hl_mmap_mem_buf_get(mmg, ts_handle);
+ if (!buf) {
rc = -EINVAL;
goto put_cq_cb;
}
/* Find first available record */
- rc = ts_buff_get_kernel_ts_record(ts_buff, cq_cb, ts_offset,
+ rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
cq_counters_offset, target_value,
&interrupt->wait_list_lock, &pend);
if (rc)
@@ -3087,7 +3082,7 @@ ts_registration_exit:
return rc;
put_ts_buff:
- hl_ts_put(ts_buff);
+ hl_mmap_mem_buf_put(buf);
put_cq_cb:
hl_cb_put(cq_cb);
put_ctx:
@@ -3111,7 +3106,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
timeout = hl_usecs64_to_jiffies(timeout_us);
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
@@ -3249,7 +3244,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
- rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr, &hpriv->ts_mem_mgr,
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
args->in.interrupt_timeout_us, args->in.cq_counters_handle,
args->in.cq_counters_offset,
args->in.target, interrupt,
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index c6360e33bce8..ed2cfd0c6e99 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -262,7 +262,7 @@ err_hw_block_mem_fini:
return rc;
}
-void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
+void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
}
@@ -284,7 +284,7 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
* immediately once we find him
*/
ctx = hpriv->ctx;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
break;
}
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index f18495545854..c6744bfc6da4 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <linux/iommu.h>
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
@@ -125,9 +126,9 @@ static int command_buffers_show(struct seq_file *s, void *data)
}
seq_printf(s,
" %03llu %d 0x%08x %d %d %d\n",
- cb->id, cb->ctx->asid, cb->size,
- kref_read(&cb->refcount),
- cb->mmap, atomic_read(&cb->cs_cnt));
+ cb->buf->handle, cb->ctx->asid, cb->size,
+ kref_read(&cb->buf->refcount),
+ atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
}
spin_unlock(&dev_entry->cb_spinlock);
@@ -369,8 +370,7 @@ static int userptr_lookup_show(struct seq_file *s, void *data)
if (dev_entry->userptr_lookup >= userptr->addr &&
dev_entry->userptr_lookup < userptr->addr + userptr->size) {
total_npages = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
- i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
sg_start = userptr->addr +
total_npages * PAGE_SIZE;
@@ -538,6 +538,39 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 val = entry->memory_scrub_val;
+ int rc;
+
+ if (!hl_device_operational(hdev, NULL)) {
+ dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
+ return -EIO;
+ }
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ if (hdev->is_compute_ctx_active) {
+ mutex_unlock(&hdev->fpriv_list_lock);
+ dev_err(hdev->dev, "can't scrub dram, context exist\n");
+ return -EBUSY;
+ }
+ hdev->is_in_dram_scrub = true;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ hdev->is_in_dram_scrub = false;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -647,13 +680,105 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
return rc;
}
+static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
+ u64 *val, enum debugfs_access_type acc_type, bool *found)
+{
+ size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+ sizeof(u64) : sizeof(u32);
+ struct pci_mem_region *mem_reg;
+ int i;
+
+ for (i = 0; i < PCI_REGION_NUMBER; i++) {
+ mem_reg = &hdev->pci_mem_region[i];
+ if (!mem_reg->used)
+ continue;
+ if (addr >= mem_reg->region_base &&
+ addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
+ *found = true;
+ return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
+ addr, val, acc_type);
+ }
+ }
+ return 0;
+}
+
+static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 offset = prop->device_dma_offset_for_host_access;
+
+ switch (acc_type) {
+ case DEBUGFS_READ32:
+ *val = *(u32 *) phys_to_virt(addr - offset);
+ break;
+ case DEBUGFS_WRITE32:
+ *(u32 *) phys_to_virt(addr - offset) = *val;
+ break;
+ case DEBUGFS_READ64:
+ *val = *(u64 *) phys_to_virt(addr - offset);
+ break;
+ case DEBUGFS_WRITE64:
+ *(u64 *) phys_to_virt(addr - offset) = *val;
+ break;
+ default:
+ dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
+ break;
+ }
+}
+
+static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+ sizeof(u64) : sizeof(u32);
+ u64 host_start = hdev->asic_prop.host_base_address;
+ u64 host_end = hdev->asic_prop.host_end_address;
+ bool user_address, found = false;
+ int rc;
+
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, acc_size, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed reading addr %#llx from dev mem (%d)\n",
+ addr, rc);
+ return rc;
+ }
+
+ if (found)
+ return 0;
+
+ if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (addr >= host_start && addr <= host_end - acc_size) {
+ hl_access_host_mem(hdev, addr, val, acc_type);
+ } else {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(hdev->dev, "invalid addr %#llx\n", addr);
+ return rc;
+}
+
static ssize_t hl_data_read32(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 addr = entry->addr;
- bool user_address;
+ u64 value64, addr = entry->addr;
char tmp_buf[32];
ssize_t rc;
u32 val;
@@ -666,18 +791,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
if (*ppos)
return 0;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val);
- if (rc) {
- dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
+ if (rc)
return rc;
- }
+
+ val = value64; /* downcast back to 32 */
sprintf(tmp_buf, "0x%08x\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -689,8 +807,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 addr = entry->addr;
- bool user_address;
+ u64 value64, addr = entry->addr;
u32 value;
ssize_t rc;
@@ -703,19 +820,10 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
if (rc)
return rc;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value);
- if (rc) {
- dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
- value, addr);
+ value64 = value;
+ rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
+ if (rc)
return rc;
- }
return count;
}
@@ -726,7 +834,6 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
- bool user_address;
char tmp_buf[32];
ssize_t rc;
u64 val;
@@ -739,18 +846,9 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
if (*ppos)
return 0;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val);
- if (rc) {
- dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
+ if (rc)
return rc;
- }
sprintf(tmp_buf, "0x%016llx\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -763,7 +861,6 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
- bool user_address;
u64 value;
ssize_t rc;
@@ -776,19 +873,9 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
if (rc)
return rc;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value);
- if (rc) {
- dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
- value, addr);
+ rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
+ if (rc)
return rc;
- }
return count;
}
@@ -829,23 +916,67 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
}
/* Free the previous allocation, if there was any */
- entry->blob_desc.size = 0;
- vfree(entry->blob_desc.data);
+ entry->data_dma_blob_desc.size = 0;
+ vfree(entry->data_dma_blob_desc.data);
- entry->blob_desc.data = vmalloc(size);
- if (!entry->blob_desc.data)
+ entry->data_dma_blob_desc.data = vmalloc(size);
+ if (!entry->data_dma_blob_desc.data)
return -ENOMEM;
rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
- entry->blob_desc.data);
+ entry->data_dma_blob_desc.data);
if (rc) {
dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
- vfree(entry->blob_desc.data);
- entry->blob_desc.data = NULL;
+ vfree(entry->data_dma_blob_desc.data);
+ entry->data_dma_blob_desc.data = NULL;
+ return -EIO;
+ }
+
+ entry->data_dma_blob_desc.size = size;
+
+ return count;
+}
+
+static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 size, trig;
+ ssize_t rc;
+
+ if (hdev->reset_info.in_reset) {
+ dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
+ return 0;
+ }
+ rc = kstrtouint_from_user(buf, count, 10, &trig);
+ if (rc)
+ return rc;
+
+ if (trig != 1) {
+ dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(struct cpucp_monitor_dump);
+
+ /* Free the previous allocation, if there was any */
+ entry->mon_dump_blob_desc.size = 0;
+ vfree(entry->mon_dump_blob_desc.data);
+
+ entry->mon_dump_blob_desc.data = vmalloc(size);
+ if (!entry->mon_dump_blob_desc.data)
+ return -ENOMEM;
+
+ rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to dump monitors\n");
+ vfree(entry->mon_dump_blob_desc.data);
+ entry->mon_dump_blob_desc.data = NULL;
return -EIO;
}
- entry->blob_desc.size = size;
+ entry->mon_dump_blob_desc.size = size;
return count;
}
@@ -1218,6 +1349,11 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return count;
}
+static const struct file_operations hl_mem_scrub_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_memory_scrub,
+};
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1235,6 +1371,11 @@ static const struct file_operations hl_dma_size_fops = {
.write = hl_dma_size_write
};
+static const struct file_operations hl_monitor_dump_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_monitor_dump_trigger
+};
+
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
@@ -1350,8 +1491,10 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!dev_entry->entry_arr)
return;
- dev_entry->blob_desc.size = 0;
- dev_entry->blob_desc.data = NULL;
+ dev_entry->data_dma_blob_desc.size = 0;
+ dev_entry->data_dma_blob_desc.data = NULL;
+ dev_entry->mon_dump_blob_desc.size = 0;
+ dev_entry->mon_dump_blob_desc.data = NULL;
INIT_LIST_HEAD(&dev_entry->file_list);
INIT_LIST_HEAD(&dev_entry->cb_list);
@@ -1370,6 +1513,17 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
hl_debug_root);
+ debugfs_create_x64("memory_scrub_val",
+ 0644,
+ dev_entry->root,
+ &dev_entry->memory_scrub_val);
+
+ debugfs_create_file("memory_scrub",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_mem_scrub_fops);
+
debugfs_create_x64("addr",
0644,
dev_entry->root,
@@ -1470,7 +1624,18 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_blob("data_dma",
0400,
dev_entry->root,
- &dev_entry->blob_desc);
+ &dev_entry->data_dma_blob_desc);
+
+ debugfs_create_file("monitor_dump_trig",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_monitor_dump_fops);
+
+ debugfs_create_blob("monitor_dump",
+ 0400,
+ dev_entry->root,
+ &dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
@@ -1509,7 +1674,8 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
mutex_destroy(&entry->file_mutex);
- vfree(entry->blob_desc.data);
+ vfree(entry->data_dma_blob_desc.data);
+ vfree(entry->mon_dump_blob_desc.data);
for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
vfree(entry->state_dump[i]);
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index dc9341a64541..b4f14c6d3970 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -15,6 +15,182 @@
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
+/*
+ * hl_set_dram_bar- sets the bar to allow later access to address
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address the caller wants to access.
+ *
+ * @return: the old BAR base address on success, U64_MAX for failure.
+ * The caller should set it back to the old address after use.
+ *
+ * In case the bar space does not cover the whole address space,
+ * the bar base address should be set to allow access to a given address.
+ * This function can be called also if the bar doesn't need to be set,
+ * in that case it just won't change the base.
+ */
+static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_base_addr;
+
+ bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+
+ return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
+}
+
+
+static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type, enum pci_region region_type)
+{
+ struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
+ u64 old_base, rc;
+
+ if (region_type == PCI_REGION_DRAM) {
+ old_base = hl_set_dram_bar(hdev, addr);
+ if (old_base == U64_MAX)
+ return -EIO;
+ }
+
+ switch (acc_type) {
+ case DEBUGFS_READ8:
+ *val = readb(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE8:
+ writeb(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_READ32:
+ *val = readl(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE32:
+ writel(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_READ64:
+ *val = readq(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE64:
+ writeq(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ }
+
+ if (region_type == PCI_REGION_DRAM) {
+ rc = hl_set_dram_bar(hdev, old_base);
+ if (rc == U64_MAX)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int rc, i;
+
+ rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+ if (rc)
+ return rc;
+
+ /* Shift to the device's base physical address of host memory if necessary */
+ if (prop->device_dma_offset_for_host_access)
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ sg->dma_address += prop->device_dma_offset_for_host_access;
+
+ return 0;
+}
+
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int i;
+
+ /* Cancel the device's base physical address of host memory if necessary */
+ if (prop->device_dma_offset_for_host_access)
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ sg->dma_address -= prop->device_dma_offset_for_host_access;
+
+ dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+}
+
+/*
+ * hl_access_cfg_region - access the config region
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (read/write 64/32)
+ */
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
+ u32 val_h, val_l;
+
+ if (!IS_ALIGNED(addr, sizeof(u32))) {
+ dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
+ return -EINVAL;
+ }
+
+ switch (acc_type) {
+ case DEBUGFS_READ32:
+ *val = RREG32(addr - cfg_region->region_base);
+ break;
+ case DEBUGFS_WRITE32:
+ WREG32(addr - cfg_region->region_base, *val);
+ break;
+ case DEBUGFS_READ64:
+ val_l = RREG32(addr - cfg_region->region_base);
+ val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
+
+ *val = (((u64) val_h) << 32) | val_l;
+ break;
+ case DEBUGFS_WRITE64:
+ WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
+ WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
+ break;
+ default:
+ dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * hl_access_dev_mem - access device memory
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @region: the memory region the address belongs to
+ * @region_type: the type of the region the address belongs to
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (r/w, 32/64)
+ */
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type)
+{
+ switch (region_type) {
+ case PCI_REGION_CFG:
+ return hl_access_cfg_region(hdev, addr, val, acc_type);
+ case PCI_REGION_SRAM:
+ case PCI_REGION_DRAM:
+ return hl_access_sram_dram_region(hdev, addr, val, acc_type,
+ region_type);
+ default:
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
@@ -107,6 +283,14 @@ static void hpriv_release(struct kref *ref)
hdev->is_compute_ctx_active = false;
mutex_unlock(&hdev->fpriv_list_lock);
+ hdev->compute_ctx_in_release = 0;
+
+ /* release the eventfd */
+ if (hpriv->notifier_event.eventfd)
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+ mutex_destroy(&hpriv->notifier_event.lock);
+
kfree(hpriv);
}
@@ -146,10 +330,11 @@ static int hl_device_release(struct inode *inode, struct file *filp)
*/
hl_release_pending_user_interrupts(hpriv->hdev);
- hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
- hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+ hdev->compute_ctx_in_release = 1;
+
if (!hl_hpriv_put(hpriv))
dev_notice(hdev->dev,
"User process closed FD but device still in use\n");
@@ -176,6 +361,11 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
+ /* release the eventfd */
+ if (hpriv->notifier_event.eventfd)
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+ mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
@@ -204,17 +394,15 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
}
vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
- case HL_MMAP_TYPE_CB:
- return hl_cb_mmap(hpriv, vma);
-
case HL_MMAP_TYPE_BLOCK:
+ vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
return hl_hw_block_mmap(hpriv, vma);
+ case HL_MMAP_TYPE_CB:
case HL_MMAP_TYPE_TS_BUFF:
- return hl_ts_mmap(hpriv, vma);
+ return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
}
return -EINVAL;
@@ -424,18 +612,25 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
+ hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
+ if (!hdev->pf_wq) {
+ dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
+ rc = -ENOMEM;
+ goto free_ts_free_wq;
+ }
+
hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
- goto free_ts_free_wq;
+ goto free_pf_wq;
}
rc = hl_mmu_if_set_funcs(hdev);
if (rc)
goto free_chip_info;
- hl_cb_mgr_init(&hdev->kernel_cb_mgr);
+ hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
hdev->device_reset_work.wq =
create_singlethread_workqueue("hl_device_reset");
@@ -464,9 +659,11 @@ static int device_early_init(struct hl_device *hdev)
return 0;
free_cb_mgr:
- hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
+free_pf_wq:
+ destroy_workqueue(hdev->pf_wq);
free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq);
free_eq_wq:
@@ -503,10 +700,11 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
- hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
+ destroy_workqueue(hdev->pf_wq);
destroy_workqueue(hdev->ts_free_obj_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq);
@@ -703,6 +901,9 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev, skip_wq_flush);
+ /* flush the MMU prefetch workqueue */
+ flush_workqueue(hdev->pf_wq);
+
/* Release all pending user interrupts, each pending user interrupt
* holds a reference to user context
*/
@@ -847,10 +1048,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
put_task_struct(task);
} else {
- dev_warn(hdev->dev,
- "Can't get task struct for PID so giving up on killing process\n");
- mutex_unlock(fd_lock);
- return -ETIME;
+ /*
+ * If we got here, it means that process was killed from outside the driver
+ * right after it started looping on fd_list and before get_pid_task, thus
+ * we don't need to kill it.
+ */
+ dev_dbg(hdev->dev,
+ "Can't get task struct for user process, assuming process was killed from outside the driver\n");
}
}
@@ -1062,9 +1266,9 @@ do_reset:
if (hard_reset)
dev_info(hdev->dev, "Going to reset device\n");
else if (reset_upon_device_release)
- dev_info(hdev->dev, "Going to reset device after release by user\n");
+ dev_dbg(hdev->dev, "Going to reset device after release by user\n");
else
- dev_info(hdev->dev, "Going to reset engines of inference device\n");
+ dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
again:
@@ -1270,7 +1474,10 @@ kill_processes:
hdev->reset_info.needs_reset = false;
- dev_notice(hdev->dev, "Successfully finished resetting the device\n");
+ if (hard_reset)
+ dev_info(hdev->dev, "Successfully finished resetting the device\n");
+ else
+ dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
@@ -1323,6 +1530,43 @@ out_err:
return rc;
}
+static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event)
+{
+ mutex_lock(&notifier_event->lock);
+ notifier_event->events_mask |= event;
+ if (notifier_event->eventfd)
+ eventfd_signal(notifier_event->eventfd, 1);
+
+ mutex_unlock(&notifier_event->lock);
+}
+
+/*
+ * hl_notifier_event_send_all - notify all user processes via eventfd
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @event: the occurred event
+ * Returns 0 for success or an error on failure.
+ */
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
+{
+ struct hl_fpriv *hpriv;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+ hl_notifier_event_send(&hpriv->notifier_event, event);
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ /* control device */
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
+ hl_notifier_event_send(&hpriv->notifier_event, event);
+
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
+}
+
/*
* hl_device_init - main initialization function for habanalabs device
*
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 3262126cc7ca..828a36af5b14 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -18,8 +18,9 @@
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
+ u32 ver_offset;
- fw_ver = kmalloc(16, GFP_KERNEL);
+ fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
if (!fw_ver)
return NULL;
@@ -29,9 +30,10 @@ static char *extract_fw_ver_from_str(const char *fw_str)
/* Skip the fw- part */
str += 3;
+ ver_offset = str - fw_str;
/* Copy until the next whitespace */
- whitespace = strnstr(str, " ", 15);
+ whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
if (!whitespace)
goto free_fw_ver;
@@ -819,6 +821,54 @@ out:
return rc;
}
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ struct cpucp_monitor_dump *mon_dump_cpu_addr;
+ dma_addr_t mon_dump_dma_addr;
+ struct cpucp_packet pkt = {};
+ size_t data_size;
+ __le32 *src_ptr;
+ u32 *dst_ptr;
+ u64 result;
+ int i, rc;
+
+ data_size = sizeof(struct cpucp_monitor_dump);
+ mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
+ &mon_dump_dma_addr);
+ if (!mon_dump_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
+ return -ENOMEM;
+ }
+
+ memset(mon_dump_cpu_addr, 0, data_size);
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(mon_dump_dma_addr);
+ pkt.data_max_size = cpu_to_le32(data_size);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
+ goto out;
+ }
+
+ /* result contains the actual size */
+ src_ptr = (__le32 *) mon_dump_cpu_addr;
+ dst_ptr = data;
+ for (i = 0; i < (data_size / sizeof(u32)); i++) {
+ *dst_ptr = le32_to_cpu(*src_ptr);
+ src_ptr++;
+ dst_ptr++;
+ }
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+
+ return rc;
+}
+
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters)
{
@@ -1539,7 +1589,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
status,
FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
- hdev->fw_poll_interval_usec,
+ hdev->fw_comms_poll_interval_usec,
timeout);
if (rc) {
@@ -1909,7 +1959,7 @@ static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
* @fwc: the firmware component
* @fw_version: fw component's version string
*/
-static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
+static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
enum hl_fw_component fwc,
const char *fw_version)
{
@@ -1933,23 +1983,33 @@ static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
VERSION_MAX_LEN);
if (preboot_ver && preboot_ver != prop->preboot_ver) {
strscpy(btl_ver, prop->preboot_ver,
- min((int) (preboot_ver - prop->preboot_ver),
- 31));
+ min((int) (preboot_ver - prop->preboot_ver), 31));
dev_info(hdev->dev, "%s\n", btl_ver);
}
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
- dev_info(hdev->dev, "preboot version %s\n",
- preboot_ver);
+ char major[8];
+ int rc;
+
+ dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
+ sprintf(major, "%.2s", preboot_ver);
kfree(preboot_ver);
+
+ rc = kstrtou32(major, 10, &hdev->fw_major_version);
+ if (rc) {
+ dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ return rc;
+ }
}
break;
default:
dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
- return;
+ return -EINVAL;
}
+
+ return 0;
}
/**
@@ -2121,9 +2181,10 @@ static int hl_fw_dynamic_load_image(struct hl_device *hdev,
goto release_fw;
/* read preboot version */
- hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
+ rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-
+ if (rc)
+ goto release_fw;
/* update state according to boot stage */
if (cur_fwc == FW_COMP_BOOT_FIT) {
@@ -2390,9 +2451,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
/* read preboot version */
- hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+ return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
- return 0;
}
/* load boot fit to FW */
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 1edaf6ab67bd..b0b0f3f89865 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -21,6 +21,7 @@
#include <linux/hashtable.h>
#include <linux/debugfs.h>
#include <linux/rwsem.h>
+#include <linux/eventfd.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -61,8 +62,10 @@
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */
#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
+#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC 1000000 /* 1s */
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
@@ -394,18 +397,8 @@ enum hl_device_hw_state {
* struct hl_mmu_properties - ASIC specific MMU address translation properties.
* @start_addr: virtual start address of the memory region.
* @end_addr: virtual end address of the memory region.
- * @hop0_shift: shift of hop 0 mask.
- * @hop1_shift: shift of hop 1 mask.
- * @hop2_shift: shift of hop 2 mask.
- * @hop3_shift: shift of hop 3 mask.
- * @hop4_shift: shift of hop 4 mask.
- * @hop5_shift: shift of hop 5 mask.
- * @hop0_mask: mask to get the PTE address in hop 0.
- * @hop1_mask: mask to get the PTE address in hop 1.
- * @hop2_mask: mask to get the PTE address in hop 2.
- * @hop3_mask: mask to get the PTE address in hop 3.
- * @hop4_mask: mask to get the PTE address in hop 4.
- * @hop5_mask: mask to get the PTE address in hop 5.
+ * @hop_shifts: array holds HOPs shifts.
+ * @hop_masks: array holds HOPs masks.
* @last_mask: mask to get the bit indicating this is the last hop.
* @pgt_size: size for page tables.
* @page_size: default page size used to allocate memory.
@@ -418,18 +411,8 @@ enum hl_device_hw_state {
struct hl_mmu_properties {
u64 start_addr;
u64 end_addr;
- u64 hop0_shift;
- u64 hop1_shift;
- u64 hop2_shift;
- u64 hop3_shift;
- u64 hop4_shift;
- u64 hop5_shift;
- u64 hop0_mask;
- u64 hop1_mask;
- u64 hop2_mask;
- u64 hop3_mask;
- u64 hop4_mask;
- u64 hop5_mask;
+ u64 hop_shifts[MMU_HOP_MAX];
+ u64 hop_masks[MMU_HOP_MAX];
u64 last_mask;
u64 pgt_size;
u32 page_size;
@@ -486,8 +469,10 @@ struct hl_hints_range {
* the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
- * device_dma_offset_for_host_access: the offset to add to host DMA addresses
- * to enable the device to access them.
+ * @device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ * to enable the device to access them.
+ * @host_base_address: host physical start address for host DMA from device
+ * @host_end_address: host physical end address for host DMA from device
* @max_freq_value: current max clk frequency.
* @clk_pll_index: clock PLL index that specify which PLL determines the clock
* we display to the user
@@ -528,6 +513,10 @@ struct hl_hints_range {
* @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
* status reported by FW, bit description can be
* found in CPU_BOOT_DEV_STS1
+ * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
+ * which the property supports_user_set_page_size is true
+ * (i.e. the DRAM supports multiple page sizes), otherwise
+ * it will shall be equal to dram_page_size.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -568,6 +557,7 @@ struct hl_hints_range {
* @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
* @set_max_power_on_device_init: true if need to set max power in F/W on device init.
* @supports_user_set_page_size: true if user can set the allocation page size.
+ * @dma_mask: the dma mask to be set for this device
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -599,6 +589,8 @@ struct asic_fixed_properties {
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 device_dma_offset_for_host_access;
+ u64 host_base_address;
+ u64 host_end_address;
u64 max_freq_value;
u32 clk_pll_index;
u32 mmu_pgt_size;
@@ -626,6 +618,7 @@ struct asic_fixed_properties {
u32 fw_bootfit_cpu_boot_dev_sts1;
u32 fw_app_cpu_boot_dev_sts0;
u32 fw_app_cpu_boot_dev_sts1;
+ u32 device_mem_alloc_default_page_size;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -654,6 +647,7 @@ struct asic_fixed_properties {
u8 configurable_stop_on_err;
u8 set_max_power_on_device_init;
u8 supports_user_set_page_size;
+ u8 dma_mask;
};
/**
@@ -711,85 +705,102 @@ struct hl_cs_compl {
*/
/**
- * struct hl_cb_mgr - describes a Command Buffer Manager.
- * @cb_lock: protects cb_handles.
- * @cb_handles: an idr to hold all command buffer handles.
- */
-struct hl_cb_mgr {
- spinlock_t cb_lock;
- struct idr cb_handles; /* protected by cb_lock */
-};
-
-/**
- * struct hl_ts_mgr - describes the timestamp registration memory manager.
- * @ts_lock: protects ts_handles.
- * @ts_handles: an idr to hold all ts bufferes handles.
- */
-struct hl_ts_mgr {
- spinlock_t ts_lock;
- struct idr ts_handles;
-};
-
-/**
* struct hl_ts_buff - describes a timestamp buffer.
- * @refcount: reference counter for usage of the buffer.
- * @hdev: pointer to device this buffer belongs to.
- * @mmap: true if the buff is currently mapped to user.
* @kernel_buff_address: Holds the internal buffer's kernel virtual address.
* @user_buff_address: Holds the user buffer's kernel virtual address.
- * @id: the buffer ID.
- * @mmap_size: Holds the buffer size that was mmaped.
* @kernel_buff_size: Holds the internal kernel buffer size.
- * @user_buff_size: Holds the user buffer size.
*/
struct hl_ts_buff {
- struct kref refcount;
- struct hl_device *hdev;
- atomic_t mmap;
void *kernel_buff_address;
void *user_buff_address;
- u32 id;
- u32 mmap_size;
u32 kernel_buff_size;
- u32 user_buff_size;
+};
+
+struct hl_mmap_mem_buf;
+
+/**
+ * struct hl_mem_mgr - describes unified memory manager for mappable memory chunks.
+ * @dev: back pointer to the owning device
+ * @lock: protects handles
+ * @handles: an idr holding all active handles to the memory buffers in the system.
+ */
+struct hl_mem_mgr {
+ struct device *dev;
+ spinlock_t lock;
+ struct idr handles;
+};
+
+/**
+ * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
+ * @topic: string identifier used for logging
+ * @mem_id: memory type identifier, embedded in the handle and used to identify
+ * the memory type by handle.
+ * @alloc: callback executed on buffer allocation, shall allocate the memory,
+ * set it under buffer private, and set mappable size.
+ * @mmap: callback executed on mmap, must map the buffer to vma
+ * @release: callback executed on release, must free the resources used by the buffer
+ */
+struct hl_mmap_mem_buf_behavior {
+ const char *topic;
+ u64 mem_id;
+
+ int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
+ int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
+ void (*release)(struct hl_mmap_mem_buf *buf);
+};
+
+/**
+ * struct hl_mmap_mem_buf - describes a single unified memory buffer
+ * @behavior: buffer behavior
+ * @mmg: back pointer to the unified memory manager
+ * @refcount: reference counter for buffer users
+ * @private: pointer to buffer behavior private data
+ * @mmap: atomic boolean indicating whether or not the buffer is mapped right now
+ * @real_mapped_size: the actual size of buffer mapped, after part of it may be released,
+ * may change at runtime.
+ * @mappable_size: the original mappable size of the buffer, does not change after
+ * the allocation.
+ * @handle: the buffer id in mmg handles store
+ */
+struct hl_mmap_mem_buf {
+ struct hl_mmap_mem_buf_behavior *behavior;
+ struct hl_mem_mgr *mmg;
+ struct kref refcount;
+ void *private;
+ atomic_t mmap;
+ u64 real_mapped_size;
+ u64 mappable_size;
+ u64 handle;
};
/**
* struct hl_cb - describes a Command Buffer.
- * @refcount: reference counter for usage of the CB.
* @hdev: pointer to device this CB belongs to.
* @ctx: pointer to the CB owner's context.
- * @lock: spinlock to protect mmap flows.
+ * @buf: back pointer to the parent mappable memory buffer
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
* @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
* the device's MMU.
- * @id: the CB's ID.
* @kernel_address: Holds the CB's kernel virtual address.
* @bus_address: Holds the CB's DMA address.
- * @mmap_size: Holds the CB's size that was mmaped.
* @size: holds the CB's size.
* @cs_cnt: holds number of CS that this CB participates in.
- * @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internaly allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
- struct kref refcount;
struct hl_device *hdev;
struct hl_ctx *ctx;
- spinlock_t lock;
+ struct hl_mmap_mem_buf *buf;
struct list_head debugfs_list;
struct list_head pool_list;
struct list_head va_block_list;
- u64 id;
void *kernel_address;
dma_addr_t bus_address;
- u32 mmap_size;
u32 size;
atomic_t cs_cnt;
- u8 mmap;
u8 is_pool;
u8 is_internal;
u8 is_mmu_mapped;
@@ -935,12 +946,12 @@ struct hl_user_interrupt {
* struct timestamp_reg_free_node - holds the timestamp registration free objects node
* @free_objects_node: node in the list free_obj_jobs
* @cq_cb: pointer to cq command buffer to be freed
- * @ts_buff: pointer to timestamp buffer to be freed
+ * @buf: pointer to timestamp buffer to be freed
*/
struct timestamp_reg_free_node {
struct list_head free_objects_node;
struct hl_cb *cq_cb;
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
};
/* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
@@ -957,8 +968,8 @@ struct timestamp_reg_work_obj {
};
/* struct timestamp_reg_info - holds the timestamp registration related data.
- * @ts_buff: pointer to the timestamp buffer which include both user/kernel buffers.
- * relevant only when doing timestamps records registration.
+ * @buf: pointer to the timestamp buffer which include both user/kernel buffers.
+ * relevant only when doing timestamps records registration.
* @cq_cb: pointer to CQ counter CB.
* @timestamp_kernel_addr: timestamp handle address, where to set timestamp
* relevant only when doing timestamps records
@@ -969,7 +980,7 @@ struct timestamp_reg_work_obj {
* allocating records dynamically.
*/
struct timestamp_reg_info {
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
u64 *timestamp_kernel_addr;
u8 in_use;
@@ -1068,6 +1079,15 @@ enum div_select_defs {
DIV_SEL_DIVIDED_PLL = 3,
};
+enum debugfs_access_type {
+ DEBUGFS_READ8,
+ DEBUGFS_WRITE8,
+ DEBUGFS_READ32,
+ DEBUGFS_WRITE32,
+ DEBUGFS_READ64,
+ DEBUGFS_WRITE64,
+};
+
enum pci_region {
PCI_REGION_CFG,
PCI_REGION_SRAM,
@@ -1229,6 +1249,7 @@ struct fw_load_mgr {
* its implementation is not trivial when the driver
* is loaded in simulation mode (not upstreamed).
* @scrub_device_mem: Scrub device memory given an address and size
+ * @scrub_device_dram: Scrub the dram memory of the device.
* @get_int_queue_base: get the internal queue base address.
* @test_queues: run simple test on all queues for sanity check.
* @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
@@ -1236,18 +1257,14 @@ struct fw_load_mgr {
* @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
- * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
+ * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
* @cs_parser: parse Command Submission.
- * @asic_dma_map_sg: DMA map scatter-gather list.
+ * @asic_dma_map_sgtable: DMA map scatter-gather table.
* @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
* @restore_phase_topology: clear all SOBs amd MONs.
- * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
- * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
- * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
- * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
* @debugfs_read_dma: debug interface for reading up to 2MB from the device's
* internal memory via DMA engine.
* @add_device_attr: add ASIC specific device attributes.
@@ -1257,8 +1274,8 @@ struct fw_load_mgr {
* @write_pte: write MMU page table entry to DRAM.
* @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
* (L1 only) or hard (L0 & L1) flush.
- * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
- * ASID-VA-size mask.
+ * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with ASID-VA-size mask.
+ * @mmu_prefetch_cache_range: pre-fetch specific MMU STLB cache lines with ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to CPU-CP and verify response.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
@@ -1267,6 +1284,7 @@ struct fw_load_mgr {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
+ * @get_monitor_dump: retrieve monitor registers dump from F/W.
* @send_cpu_message: send message to F/W. If the message is timedout, the
* driver will eventually reset the device. The timeout can
* be determined by the calling function or it can be 0 and
@@ -1289,8 +1307,6 @@ struct fw_load_mgr {
* @gen_wait_cb: Generate a wait CB.
* @reset_sob: Reset a SOB.
* @reset_sob_group: Reset SOB group
- * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
- * firmware configuration
* @get_device_time: Get the device time.
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
@@ -1319,6 +1335,9 @@ struct fw_load_mgr {
* @get_stream_master_qid_arr: get pointer to stream masters QID array
* @is_valid_dram_page_size: return true if page size is supported in device
* memory allocation, otherwise false.
+ * @get_valid_dram_page_orders: get valid device memory allocation page orders
+ * @access_dev_mem: access device memory
+ * @set_dram_bar_base: set the base of the DRAM BAR
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1342,6 +1361,7 @@ struct hl_asic_funcs {
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
+ int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
int (*test_queues)(struct hl_device *hdev);
@@ -1353,12 +1373,11 @@ struct hl_asic_funcs {
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
- void (*hl_dma_unmap_sg)(struct hl_device *hdev,
- struct scatterlist *sgl, int nents,
+ void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
+ struct sg_table *sgt,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
- int (*asic_dma_map_sg)(struct hl_device *hdev,
- struct scatterlist *sgl, int nents,
+ int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
struct sg_table *sgt);
@@ -1369,14 +1388,6 @@ struct hl_asic_funcs {
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
- int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val);
- int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val);
- int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val);
- int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val);
int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
void *blob_addr);
void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
@@ -1391,6 +1402,7 @@ struct hl_asic_funcs {
u32 flags);
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size);
+ int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
@@ -1399,8 +1411,8 @@ struct hl_asic_funcs {
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
u32 (*get_pci_id)(struct hl_device *hdev);
- int (*get_eeprom_data)(struct hl_device *hdev, void *data,
- size_t max_size);
+ int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
+ int (*get_monitor_dump)(struct hl_device *hdev, void *data);
int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
u16 len, u32 timeout, u64 *result);
int (*pci_bars_map)(struct hl_device *hdev);
@@ -1421,7 +1433,6 @@ struct hl_asic_funcs {
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
- void (*set_dma_mask_from_fw)(struct hl_device *hdev);
u64 (*get_device_time)(struct hl_device *hdev);
int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
@@ -1445,6 +1456,12 @@ struct hl_asic_funcs {
void (*set_pci_memory_regions)(struct hl_device *hdev);
u32* (*get_stream_master_qid_arr)(void);
bool (*is_valid_dram_page_size)(u32 page_size);
+ int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr);
+ void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
+ int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+ u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
};
@@ -1915,6 +1932,18 @@ struct hl_debug_params {
bool enable;
};
+/**
+ * struct hl_notifier_event - holds the notifier data structure
+ * @eventfd: the event file descriptor to raise the notifications
+ * @lock: mutex lock to protect the notifier data flows
+ * @events_mask: indicates the bitmap events
+ */
+struct hl_notifier_event {
+ struct eventfd_ctx *eventfd;
+ struct mutex lock;
+ u64 events_mask;
+};
+
/*
* FILE PRIVATE STRUCTURE
*/
@@ -1926,25 +1955,25 @@ struct hl_debug_params {
* @taskpid: current process ID.
* @ctx: current executing context. TODO: remove for multiple ctx per process
* @ctx_mgr: context manager to handle multiple context for this FD.
- * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
- * @ts_mem_mgr: timestamp registration manager for alloc/free/map timestamp buffers.
+ * @mem_mgr: manager descriptor for memory exportable via mmap
+ * @notifier_event: notifier eventfd towards user process
* @debugfs_list: list of relevant ASIC debugfs.
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
*/
struct hl_fpriv {
- struct hl_device *hdev;
- struct file *filp;
- struct pid *taskpid;
- struct hl_ctx *ctx;
- struct hl_ctx_mgr ctx_mgr;
- struct hl_cb_mgr cb_mgr;
- struct hl_ts_mgr ts_mem_mgr;
- struct list_head debugfs_list;
- struct list_head dev_node;
- struct kref refcount;
- struct mutex restore_phase_mutex;
+ struct hl_device *hdev;
+ struct file *filp;
+ struct pid *taskpid;
+ struct hl_ctx *ctx;
+ struct hl_ctx_mgr ctx_mgr;
+ struct hl_mem_mgr mem_mgr;
+ struct hl_notifier_event notifier_event;
+ struct list_head debugfs_list;
+ struct list_head dev_node;
+ struct kref refcount;
+ struct mutex restore_phase_mutex;
};
@@ -1992,12 +2021,14 @@ struct hl_debugfs_entry {
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
- * @blob_desc: descriptor of blob
+ * @data_dma_blob_desc: data DMA descriptor of blob.
+ * @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
* @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
* @userptr_lookup: the target user ptr to look up for on demand.
+ * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @mmu_asid: ASID to use while translating in mmu_show.
* @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
@@ -2021,12 +2052,14 @@ struct hl_dbg_device_entry {
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
- struct debugfs_blob_wrapper blob_desc;
+ struct debugfs_blob_wrapper data_dma_blob_desc;
+ struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];
struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
u64 userptr_lookup;
+ u64 memory_scrub_val;
u32 mmu_asid;
u32 state_dump_head;
u8 i2c_bus;
@@ -2442,6 +2475,24 @@ struct hl_mmu_funcs {
};
/**
+ * struct hl_prefetch_work - prefetch work structure handler
+ * @pf_work: actual work struct.
+ * @ctx: compute context.
+ * @va: virtual address to pre-fetch.
+ * @size: pre-fetch size.
+ * @flags: operation flags.
+ * @asid: ASID for maintenance operation.
+ */
+struct hl_prefetch_work {
+ struct work_struct pf_work;
+ struct hl_ctx *ctx;
+ u64 va;
+ u64 size;
+ u32 flags;
+ u32 asid;
+};
+
+/*
* number of user contexts allowed to call wait_for_multi_cs ioctl in
* parallel
*/
@@ -2517,37 +2568,50 @@ struct hl_clk_throttle {
};
/**
- * struct last_error_session_info - info about last session in which CS timeout or
- * razwi error occurred.
- * @open_dev_timestamp: device open timestamp.
- * @cs_timeout_timestamp: CS timeout timestamp.
- * @razwi_timestamp: razwi timestamp.
- * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
- * first (root cause) CS timeout will not be overwritten.
- * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
- * first (root cause) razwi will not be overwritten.
- * @cs_timeout_seq: CS timeout sequence number.
- * @razwi_addr: address that caused razwi.
- * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
- * not have engine id it will be set to U16_MAX.
- * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
- * engines which one them caused the razwi. In that case, it will contain the
- * second possible engine id, otherwise it will be set to U16_MAX.
- * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
- * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ * struct cs_timeout_info - info of last CS timeout occurred.
+ * @timestamp: CS timeout timestamp.
+ * @write_disable: if set writing to CS parameters in the structure is disabled so,
+ * the first (root cause) CS timeout will not be overwritten.
+ * @seq: CS timeout sequence number.
+ */
+struct cs_timeout_info {
+ ktime_t timestamp;
+ atomic_t write_disable;
+ u64 seq;
+};
+
+/**
+ * struct razwi_info - info about last razwi error occurred.
+ * @timestamp: razwi timestamp.
+ * @write_disable: if set writing to razwi parameters in the structure is disabled so the
+ * first (root cause) razwi will not be overwritten.
+ * @addr: address that caused razwi.
+ * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
+ * not have engine id it will be set to U16_MAX.
+ * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @non_engine_initiator: in case the initiator of the razwi does not have engine id.
+ * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ */
+struct razwi_info {
+ ktime_t timestamp;
+ atomic_t write_disable;
+ u64 addr;
+ u16 engine_id_1;
+ u16 engine_id_2;
+ u8 non_engine_initiator;
+ u8 type;
+};
+
+/**
+ * struct last_error_session_info - info about last session errors occurred.
+ * @cs_timeout: CS timeout error last information.
+ * @razwi: razwi last information.
*/
struct last_error_session_info {
- ktime_t open_dev_timestamp;
- ktime_t cs_timeout_timestamp;
- ktime_t razwi_timestamp;
- atomic_t cs_write_disable;
- atomic_t razwi_write_disable;
- u64 cs_timeout_seq;
- u64 razwi_addr;
- u16 razwi_engine_id_1;
- u16 razwi_engine_id_2;
- u8 razwi_non_engine_initiator;
- u8 razwi_type;
+ struct cs_timeout_info cs_timeout;
+ struct razwi_info razwi;
};
/**
@@ -2614,11 +2678,12 @@ struct hl_reset_info {
* context.
* @eq_wq: work queue of event queue for executing work in process context.
* @ts_free_obj_wq: work queue for timestamp registration objects release.
+ * @pf_wq: work queue for MMU pre-fetch operations.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
* @cs_mirror_list: CS mirror list for TDR.
* @cs_mirror_lock: protects cs_mirror_list.
- * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
+ * @kernel_mem_mgr: memory manager for memory buffers with lifespan of driver.
* @event_queue: event queue for IRQ from CPU-CP.
* @dma_pool: DMA pool for small allocations.
* @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
@@ -2656,9 +2721,10 @@ struct hl_reset_info {
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
* @clk_throttling: holds information about current/previous clock throttling events
- * @reset_info: holds current device reset information.
* @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
+ * @fw_major_version: major version of current loaded preboot
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2678,6 +2744,9 @@ struct hl_reset_info {
* session.
* @open_counter: number of successful device open operations.
* @fw_poll_interval_usec: FW status poll interval in usec.
+ * used for CPU boot status
+ * @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
+ * used for COMMs protocols cmds(COMMS_STS_*)
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
@@ -2686,6 +2755,7 @@ struct hl_reset_info {
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
* addresses.
+ * @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -2699,7 +2769,6 @@ struct hl_reset_info {
* huge pages.
* @init_done: is the initialization of the device done.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
- * @dma_mask: the dma mask that was set for this device
* @in_debug: whether the device is in a state where the profiling/tracing infrastructure
* can be used. This indication is needed because in some ASICs we need to do
* specific operations to enable that infrastructure.
@@ -2721,6 +2790,8 @@ struct hl_reset_info {
* cases where Linux was not loaded to device CPU
* @supports_wait_for_multi_cs: true if wait for multi CS is supported
* @is_compute_ctx_active: Whether there is an active compute context executing.
+ * @compute_ctx_in_release: true if the current compute context is being released.
+ * @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2742,11 +2813,12 @@ struct hl_device {
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct workqueue_struct *ts_free_obj_wq;
+ struct workqueue_struct *pf_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
struct list_head cs_mirror_list;
spinlock_t cs_mirror_lock;
- struct hl_cb_mgr kernel_cb_mgr;
+ struct hl_mem_mgr kernel_mem_mgr;
struct hl_eq event_queue;
struct dma_pool *dma_pool;
void *cpu_accessible_dma_mem;
@@ -2797,6 +2869,7 @@ struct hl_device {
struct hl_reset_info reset_info;
u32 *stream_master_qid_arr;
+ u32 fw_major_version;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -2807,12 +2880,15 @@ struct hl_device {
u64 open_counter;
u64 fw_poll_interval_usec;
ktime_t last_successful_open_ktime;
+ u64 fw_comms_poll_interval_usec;
+
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
+ u8 is_in_dram_scrub;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -2823,7 +2899,6 @@ struct hl_device {
u8 pmmu_huge_range;
u8 init_done;
u8 device_cpu_disabled;
- u8 dma_mask;
u8 in_debug;
u8 cdev_sysfs_created;
u8 stop_on_err;
@@ -2839,6 +2914,8 @@ struct hl_device {
u8 supports_wait_for_multi_cs;
u8 stream_master_qid_arr_size;
u8 is_compute_ctx_active;
+ u8 compute_ctx_in_release;
+ u8 supports_mmu_prefetch;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2971,6 +3048,14 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
return ((address <= range_end_address) && (range_start_address <= end_address));
}
+uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir);
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type);
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
int hl_device_open(struct inode *inode, struct file *filp);
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
@@ -3013,7 +3098,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
void hl_ctx_do_release(struct kref *ref);
-void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_ctx_get(struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
@@ -3034,23 +3119,21 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct cpucp_sensor *sensors_arr);
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
+
int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);
int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
bool map_cb, u64 *handle);
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle);
int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 handle);
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle);
void hl_cb_put(struct hl_cb *cb);
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
@@ -3104,6 +3187,8 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx);
void hl_mmu_ctx_fini(struct hl_ctx *ctx);
int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool flush_pte);
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr);
int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
bool flush_pte);
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
@@ -3112,6 +3197,7 @@ int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size);
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u8 hop_idx, u64 hop_addr, u64 virt_addr);
@@ -3149,6 +3235,7 @@ int hl_fw_cpucp_handshake(struct hl_device *hdev,
u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
u32 boot_err1_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
@@ -3224,11 +3311,19 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
const char *format, ...);
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr);
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr);
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, u32 handle);
-void hl_ts_put(struct hl_ts_buff *buff);
+
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+ void *args);
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
+ u64 handle);
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle);
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf);
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+ void *args);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index ca404ed9d9a7..37edb69a7255 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -134,13 +134,14 @@ int hl_device_open(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
+
+ mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
- hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hl_ts_mgr_init(&hpriv->ts_mem_mgr);
+ hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
@@ -150,7 +151,28 @@ int hl_device_open(struct inode *inode, struct file *filp)
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
- rc = -EPERM;
+
+ if (status == HL_DEVICE_STATUS_IN_RESET)
+ rc = -EAGAIN;
+ else
+ rc = -EPERM;
+
+ goto out_err;
+ }
+
+ if (hdev->is_in_dram_scrub) {
+ dev_dbg_ratelimited(hdev->dev,
+ "Can't open %s during dram scrub\n",
+ dev_name(hdev->dev));
+ rc = -EAGAIN;
+ goto out_err;
+ }
+
+ if (hdev->compute_ctx_in_release) {
+ dev_dbg_ratelimited(hdev->dev,
+ "Can't open %s because another user is still releasing it\n",
+ dev_name(hdev->dev));
+ rc = -EAGAIN;
goto out_err;
}
@@ -173,8 +195,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_write_disable, 0);
- atomic_set(&hdev->last_error.razwi_write_disable, 0);
+ atomic_set(&hdev->last_error.cs_timeout.write_disable, 0);
+ atomic_set(&hdev->last_error.razwi.write_disable, 0);
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -184,11 +206,11 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
- hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
- hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->restore_phase_mutex);
+ mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
@@ -222,9 +244,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
+
+ mutex_init(&hpriv->notifier_event.lock);
nonseekable_open(inode, filp);
- hpriv->taskpid = find_get_pid(current->pid);
+ hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_ctrl_list_lock);
@@ -288,6 +312,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+ hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
hdev->stop_on_err = true;
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
@@ -296,9 +321,6 @@ static int fixup_device_params(struct hl_device *hdev)
/* Enable only after the initialization of the device */
hdev->disabled = true;
- /* Set default DMA mask to 32 bits */
- hdev->dma_mask = 32;
-
return 0;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index c13a3c2a7013..c7864d6bb0a1 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -76,6 +76,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.dram_page_size = prop->dram_page_size;
+ hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
@@ -115,6 +116,23 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate,
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
}
+static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ u32 max_size = args->return_size;
+ u64 events_mask;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((max_size < sizeof(u64)) || (!out))
+ return -EINVAL;
+
+ mutex_lock(&hpriv->notifier_event.lock);
+ events_mask = hpriv->notifier_event.events_mask;
+ hpriv->notifier_event.events_mask = 0;
+ mutex_unlock(&hpriv->notifier_event.lock);
+
+ return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
+}
+
static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
@@ -497,6 +515,8 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
open_stats_info.last_open_period_ms = jiffies64_to_msecs(
hdev->last_open_session_duration_jif);
open_stats_info.open_counter = hdev->open_counter;
+ open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
+ open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
return copy_to_user(out, &open_stats_info,
min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
@@ -549,7 +569,7 @@ static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *a
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp);
+ info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -564,8 +584,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.seq = hdev->last_error.cs_timeout_seq;
- info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp);
+ info.seq = hdev->last_error.cs_timeout.seq;
+ info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -580,16 +600,74 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp);
- info.addr = hdev->last_error.razwi_addr;
- info.engine_id_1 = hdev->last_error.razwi_engine_id_1;
- info.engine_id_2 = hdev->last_error.razwi_engine_id_2;
- info.no_engine_id = hdev->last_error.razwi_non_engine_initiator;
- info.error_type = hdev->last_error.razwi_type;
+ info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
+ info.addr = hdev->last_error.razwi.addr;
+ info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
+ info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
+ info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
+ info.error_type = hdev->last_error.razwi.type;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_info_dev_memalloc_page_sizes info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ /*
+ * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
+ * pages (unlike some of the ASICs before supporting multiple page sizes).
+ * For this reason for all ASICs that not support multiple page size the function will
+ * return an empty bitmask indicating that multiple page sizes is not supported.
+ */
+ hdev->asic_funcs->get_valid_dram_page_orders(&info);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ int rc;
+
+ /* check if there is already a registered on that process */
+ mutex_lock(&hpriv->notifier_event.lock);
+ if (hpriv->notifier_event.eventfd) {
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return -EINVAL;
+ }
+
+ hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
+ if (IS_ERR(hpriv->notifier_event.eventfd)) {
+ rc = PTR_ERR(hpriv->notifier_event.eventfd);
+ hpriv->notifier_event.eventfd = NULL;
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return rc;
+ }
+
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return 0;
+}
+
+static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ mutex_lock(&hpriv->notifier_event.lock);
+ if (!hpriv->notifier_event.eventfd) {
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return -EINVAL;
+ }
+
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+ hpriv->notifier_event.eventfd = NULL;
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -640,6 +718,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RAZWI_EVENT:
return razwi_info(hpriv, args);
+ case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
+ return dev_mem_alloc_page_sizes_info(hpriv, args);
+
+ case HL_INFO_GET_EVENTS:
+ return events_info(hpriv, args);
+
default:
break;
}
@@ -690,6 +774,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DRAM_PENDING_ROWS:
return dram_pending_rows_info(hpriv, args);
+ case HL_INFO_REGISTER_EVENTFD:
+ return eventfd_register(hpriv, args);
+
+ case HL_INFO_UNREGISTER_EVENTFD:
+ return eventfd_unregister(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index e2bc128f2291..8500e15ef743 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -152,11 +152,11 @@ static void hl_ts_free_objects(struct work_struct *work)
struct hl_device *hdev = job->hdev;
list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
- dev_dbg(hdev->dev, "About to put refcount to ts_buff (%p) cq_cb(%p)\n",
- free_obj->ts_buff,
+ dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
+ free_obj->buf,
free_obj->cq_cb);
- hl_ts_put(free_obj->ts_buff);
+ hl_mmap_mem_buf_put(free_obj->buf);
hl_cb_put(free_obj->cq_cb);
kfree(free_obj);
}
@@ -210,7 +210,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
/* Putting the refcount for ts_buff and cq_cb objects will be handled
* in workqueue context, just add job to free_list.
*/
- free_node->ts_buff = pend->ts_reg_info.ts_buff;
+ free_node->buf = pend->ts_reg_info.buf;
free_node->cq_cb = pend->ts_reg_info.cq_cb;
list_add(&free_node->free_objects_node, *free_list);
@@ -244,7 +244,7 @@ static void handle_user_cq(struct hl_device *hdev,
list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
- if (pend->ts_reg_info.ts_buff) {
+ if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
&ts_reg_free_list_head);
@@ -282,10 +282,6 @@ irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
struct hl_user_interrupt *user_cq = arg;
struct hl_device *hdev = user_cq->hdev;
- dev_dbg(hdev->dev,
- "got user completion interrupt id %u",
- user_cq->interrupt_id);
-
/* Handle user cq interrupts registered on all interrupts */
handle_user_cq(hdev, &hdev->common_user_interrupt);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index a13506dd8119..663dd7e589d4 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -41,7 +41,7 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u
return -EINVAL;
}
} else {
- psize = hdev->asic_prop.dram_page_size;
+ psize = prop->device_mem_alloc_default_page_size;
}
*page_size = psize;
@@ -117,7 +117,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %llu contiguous pages with total size of %llu\n",
+ "Cannot allocate %llu contiguous pages with total size of %llu\n",
num_pgs, total_size);
return -ENOMEM;
}
@@ -156,9 +156,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
else
phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
page_size);
+
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
- "Failed to allocate device memory (out of memory)\n");
+ "Cannot allocate device memory (out of memory)\n");
rc = -ENOMEM;
goto page_err;
}
@@ -237,19 +238,18 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
goto pin_err;
}
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, DMA_BIDIRECTIONAL);
- if (rc) {
- dev_err(hdev->dev, "failed to map sgt with DMA region\n");
- goto dma_map_err;
- }
-
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
userptr->vm_type = VM_TYPE_USERPTR;
*p_userptr = userptr;
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto dma_map_err;
+ }
+
return 0;
dma_map_err:
@@ -900,7 +900,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
* consecutive block.
*/
total_npages = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
@@ -929,7 +929,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->total_size = total_npages * page_size;
j = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
@@ -1102,21 +1102,24 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
* map a device virtual block to this pages and return the start address of
* this block.
*/
-static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
- u64 *device_addr)
+static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
{
- struct hl_device *hdev = ctx->hdev;
- struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ enum hl_va_range_type va_range_type = 0;
+ struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
+ u32 handle = 0, va_block_align;
struct hl_vm_hash_node *hnode;
+ struct hl_vm *vm = &hdev->vm;
struct hl_va_range *va_range;
- enum vm_type *vm_type;
+ bool is_userptr, do_prefetch;
u64 ret_vaddr, hint_addr;
- u32 handle = 0, va_block_align;
+ enum vm_type *vm_type;
int rc;
- bool is_userptr = args->flags & HL_MEM_USERPTR;
- enum hl_va_range_type va_range_type = 0;
+
+ /* set map flags */
+ is_userptr = args->flags & HL_MEM_USERPTR;
+ do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
/* Assume failure */
*device_addr = 0;
@@ -1241,19 +1244,27 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
- mutex_unlock(&ctx->mmu_lock);
- dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
- handle);
+ dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ if (rc)
+ goto map_err;
mutex_unlock(&ctx->mmu_lock);
- if (rc)
- goto map_err;
+ /*
+ * prefetch is done upon user's request. it is performed in WQ as and so can
+ * be outside the MMU lock. the operation itself is already protected by the mmu lock
+ */
+ if (do_prefetch) {
+ rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
+ phys_pg_pack->total_size);
+ if (rc)
+ goto map_err;
+ }
ret_vaddr += phys_pg_pack->offset;
@@ -1272,6 +1283,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
return rc;
map_err:
+ mutex_unlock(&ctx->mmu_lock);
+
if (add_va_block(hdev, va_range, ret_vaddr,
ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
@@ -1509,7 +1522,7 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_ops = &hw_block_vm_ops;
vma->vm_private_data = lnode;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
@@ -1819,7 +1832,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
}
hl_dmabuf->ctx = ctx;
- hl_ctx_get(hdev, hl_dmabuf->ctx);
+ hl_ctx_get(hl_dmabuf->ctx);
*dmabuf_fd = fd;
@@ -2076,164 +2089,34 @@ out:
return rc;
}
-static void ts_buff_release(struct kref *ref)
-{
- struct hl_ts_buff *buff;
-
- buff = container_of(ref, struct hl_ts_buff, refcount);
-
- vfree(buff->kernel_buff_address);
- vfree(buff->user_buff_address);
- kfree(buff);
-}
-
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr,
- u32 handle)
-{
- struct hl_ts_buff *buff;
-
- spin_lock(&mgr->ts_lock);
- buff = idr_find(&mgr->ts_handles, handle);
- if (!buff) {
- spin_unlock(&mgr->ts_lock);
- dev_warn(hdev->dev,
- "TS buff get failed, no match to handle 0x%x\n", handle);
- return NULL;
- }
- kref_get(&buff->refcount);
- spin_unlock(&mgr->ts_lock);
-
- return buff;
-}
-
-void hl_ts_put(struct hl_ts_buff *buff)
+static void ts_buff_release(struct hl_mmap_mem_buf *buf)
{
- kref_put(&buff->refcount, ts_buff_release);
-}
-
-static void buff_vm_close(struct vm_area_struct *vma)
-{
- struct hl_ts_buff *buff = (struct hl_ts_buff *) vma->vm_private_data;
- long new_mmap_size;
-
- new_mmap_size = buff->mmap_size - (vma->vm_end - vma->vm_start);
+ struct hl_ts_buff *ts_buff = buf->private;
- if (new_mmap_size > 0) {
- buff->mmap_size = new_mmap_size;
- return;
- }
-
- atomic_set(&buff->mmap, 0);
- hl_ts_put(buff);
- vma->vm_private_data = NULL;
+ vfree(ts_buff->kernel_buff_address);
+ vfree(ts_buff->user_buff_address);
+ kfree(ts_buff);
}
-static const struct vm_operations_struct ts_buff_vm_ops = {
- .close = buff_vm_close
-};
-
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ts_buff *buff;
- u32 handle, user_buff_size;
- int rc;
-
- /* We use the page offset to hold the idr and thus we need to clear
- * it before doing the mmap itself
- */
- handle = vma->vm_pgoff;
- vma->vm_pgoff = 0;
-
- buff = hl_ts_get(hdev, &hpriv->ts_mem_mgr, handle);
- if (!buff) {
- dev_err(hdev->dev,
- "TS buff mmap failed, no match to handle 0x%x\n", handle);
- return -EINVAL;
- }
-
- /* Validation check */
- user_buff_size = vma->vm_end - vma->vm_start;
- if (user_buff_size != ALIGN(buff->user_buff_size, PAGE_SIZE)) {
- dev_err(hdev->dev,
- "TS buff mmap failed, mmap size 0x%x != 0x%x buff size\n",
- user_buff_size, ALIGN(buff->user_buff_size, PAGE_SIZE));
- rc = -EINVAL;
- goto put_buff;
- }
-
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE,
- (void __user *) (uintptr_t) vma->vm_start, user_buff_size)) {
-#else
- if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
- user_buff_size)) {
-#endif
- dev_err(hdev->dev,
- "user pointer is invalid - 0x%lx\n",
- vma->vm_start);
-
- rc = -EINVAL;
- goto put_buff;
- }
+ struct hl_ts_buff *ts_buff = buf->private;
- if (atomic_cmpxchg(&buff->mmap, 0, 1)) {
- dev_err(hdev->dev, "TS buff memory mmap failed, already mmaped to user\n");
- rc = -EINVAL;
- goto put_buff;
- }
-
- vma->vm_ops = &ts_buff_vm_ops;
- vma->vm_private_data = buff;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
- rc = remap_vmalloc_range(vma, buff->user_buff_address, 0);
- if (rc) {
- atomic_set(&buff->mmap, 0);
- goto put_buff;
- }
-
- buff->mmap_size = buff->user_buff_size;
- vma->vm_pgoff = handle;
-
- return 0;
-
-put_buff:
- hl_ts_put(buff);
- return rc;
-}
-
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr)
-{
- spin_lock_init(&mgr->ts_lock);
- idr_init(&mgr->ts_handles);
+ return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr)
-{
- struct hl_ts_buff *buff;
- struct idr *idp;
- u32 id;
-
- idp = &mgr->ts_handles;
-
- idr_for_each_entry(idp, buff, id) {
- if (kref_put(&buff->refcount, ts_buff_release) != 1)
- dev_err(hdev->dev, "TS buff handle %d for CTX is still alive\n",
- id);
- }
-
- idr_destroy(&mgr->ts_handles);
-}
-
-static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_elements)
+static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_ts_buff *ts_buff = NULL;
- u32 size;
+ u32 size, num_elements;
void *p;
+ num_elements = *(u32 *)args;
+
ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
if (!ts_buff)
- return NULL;
+ return -ENOMEM;
/* Allocate the user buffer */
size = num_elements * sizeof(u64);
@@ -2242,7 +2125,7 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
goto free_mem;
ts_buff->user_buff_address = p;
- ts_buff->user_buff_size = size;
+ buf->mappable_size = size;
/* Allocate the internal kernel buffer */
size = num_elements * sizeof(struct hl_user_pending_interrupt);
@@ -2253,15 +2136,25 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
ts_buff->kernel_buff_address = p;
ts_buff->kernel_buff_size = size;
- return ts_buff;
+ buf->private = ts_buff;
+
+ return 0;
free_user_buff:
vfree(ts_buff->user_buff_address);
free_mem:
kfree(ts_buff);
- return NULL;
+ return -ENOMEM;
}
+static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
+ .topic = "TS",
+ .mem_id = HL_MMAP_TYPE_TS_BUFF,
+ .mmap = hl_ts_mmap,
+ .alloc = hl_ts_alloc_buf,
+ .release = ts_buff_release,
+};
+
/**
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
@@ -2278,54 +2171,22 @@ free_mem:
*/
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
{
- struct hl_ts_mgr *ts_mgr = &hpriv->ts_mem_mgr;
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ts_buff *ts_buff;
- int rc = 0;
+ struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
+ struct hl_mmap_mem_buf *buf;
if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
- dev_err(hdev->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
+ dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
args->num_of_elements, TS_MAX_ELEMENTS_NUM);
return -EINVAL;
}
- /* Allocate ts buffer object
- * This object will contain two buffers one that will be mapped to the user
- * and another internal buffer for the driver use only, which won't be mapped
- * to the user.
- */
- ts_buff = hl_ts_alloc_buff(hdev, args->num_of_elements);
- if (!ts_buff) {
- rc = -ENOMEM;
- goto out_err;
- }
-
- spin_lock(&ts_mgr->ts_lock);
- rc = idr_alloc(&ts_mgr->ts_handles, ts_buff, 1, 0, GFP_ATOMIC);
- spin_unlock(&ts_mgr->ts_lock);
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new ts buffer\n");
- goto release_ts_buff;
- }
-
- ts_buff->id = rc;
- ts_buff->hdev = hdev;
-
- kref_init(&ts_buff->refcount);
-
- /* idr is 32-bit so we can safely OR it with a mask that is above 32 bit */
- *handle = (u64) ts_buff->id | HL_MMAP_TYPE_TS_BUFF;
- *handle <<= PAGE_SHIFT;
+ buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
+ if (!buf)
+ return -ENOMEM;
- dev_dbg(hdev->dev, "Created ts buff object handle(%u)\n", ts_buff->id);
+ *handle = buf->handle;
return 0;
-
-release_ts_buff:
- kref_put(&ts_buff->refcount, ts_buff_release);
-out_err:
- *handle = 0;
- return rc;
}
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2587,9 +2448,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
kvfree(userptr->pages);
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
new file mode 100644
index 000000000000..ea5f2bd31b0a
--- /dev/null
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+/**
+ * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
+ * the buffer descriptor.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Find the buffer in the store and return a pointer to its descriptor.
+ * Increase buffer refcount. If not found - return NULL.
+ */
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
+{
+ struct hl_mmap_mem_buf *buf;
+
+ spin_lock(&mmg->lock);
+ buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+ if (!buf) {
+ spin_unlock(&mmg->lock);
+ dev_warn(mmg->dev,
+ "Buff get failed, no match to handle %#llx\n", handle);
+ return NULL;
+ }
+ kref_get(&buf->refcount);
+ spin_unlock(&mmg->lock);
+ return buf;
+}
+
+/**
+ * hl_mmap_mem_buf_destroy - destroy the unused buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Internal function, used as a final step of buffer release. Shall be invoked
+ * only when the buffer is no longer in use (removed from idr). Will call the
+ * release callback (if applicable), and free the memory.
+ */
+static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
+{
+ if (buf->behavior->release)
+ buf->behavior->release(buf);
+
+ kfree(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_release - release buffer
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used as a kref release callback, when the last user of
+ * the buffer is released. Shall be called from an interrupt context.
+ */
+static void hl_mmap_mem_buf_release(struct kref *kref)
+{
+ struct hl_mmap_mem_buf *buf =
+ container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+ spin_lock(&buf->mmg->lock);
+ idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+ spin_unlock(&buf->mmg->lock);
+
+ hl_mmap_mem_buf_destroy(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used for kref put by handle. Assumes mmg lock is taken.
+ * Will remove the buffer from idr, without destroying it.
+ */
+static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
+{
+ struct hl_mmap_mem_buf *buf =
+ container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+ idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+}
+
+/**
+ * hl_mmap_mem_buf_put - decrease the reference to the buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall be called from an interrupt context.
+ */
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
+{
+ return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
+}
+
+/**
+ * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
+ * given handle.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall not be called from an interrupt context. Return -EINVAL if handle was
+ * not found, else return the put outcome (0 or 1).
+ */
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
+{
+ struct hl_mmap_mem_buf *buf;
+
+ spin_lock(&mmg->lock);
+ buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+ if (!buf) {
+ spin_unlock(&mmg->lock);
+ dev_dbg(mmg->dev,
+ "Buff put failed, no match to handle %#llx\n", handle);
+ return -EINVAL;
+ }
+
+ if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
+ spin_unlock(&mmg->lock);
+ hl_mmap_mem_buf_destroy(buf);
+ return 1;
+ }
+
+ spin_unlock(&mmg->lock);
+ return 0;
+}
+
+/**
+ * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
+ *
+ * @mmg: parent unifed memory manager
+ * @behavior: behavior object describing this buffer polymorphic behavior
+ * @gfp: gfp flags to use for the memory allocations
+ * @args: additional args passed to behavior->alloc
+ *
+ * Allocate and register a new memory buffer inside the give memory manager.
+ * Return the pointer to the new buffer on success or NULL on failure.
+ */
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+ void *args)
+{
+ struct hl_mmap_mem_buf *buf;
+ int rc;
+
+ buf = kzalloc(sizeof(*buf), gfp);
+ if (!buf)
+ return NULL;
+
+ spin_lock(&mmg->lock);
+ rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mmg->lock);
+ if (rc < 0) {
+ dev_err(mmg->dev,
+ "%s: Failed to allocate IDR for a new buffer, rc=%d\n",
+ behavior->topic, rc);
+ goto free_buf;
+ }
+
+ buf->mmg = mmg;
+ buf->behavior = behavior;
+ buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
+ kref_init(&buf->refcount);
+
+ rc = buf->behavior->alloc(buf, gfp, args);
+ if (rc) {
+ dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
+ behavior->topic, rc);
+ goto remove_idr;
+ }
+
+ return buf;
+
+remove_idr:
+ spin_lock(&mmg->lock);
+ idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+ spin_unlock(&mmg->lock);
+free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+/**
+ * hl_mmap_mem_buf_vm_close - handle mmap close
+ *
+ * @vma: the vma object for which mmap was closed.
+ *
+ * Put the memory buffer if it is no longer mapped.
+ */
+static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_mmap_mem_buf *buf =
+ (struct hl_mmap_mem_buf *)vma->vm_private_data;
+ long new_mmap_size;
+
+ new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
+
+ if (new_mmap_size > 0) {
+ buf->real_mapped_size = new_mmap_size;
+ return;
+ }
+
+ atomic_set(&buf->mmap, 0);
+ hl_mmap_mem_buf_put(buf);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
+ .close = hl_mmap_mem_buf_vm_close
+};
+
+/**
+ * hl_mem_mgr_mmap - map the given buffer to the user
+ *
+ * @mmg: unifed memory manager
+ * @vma: the vma object for which mmap was closed.
+ * @args: additional args passed to behavior->mmap
+ *
+ * Map the buffer specified by the vma->vm_pgoff to the given vma.
+ */
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+ void *args)
+{
+ struct hl_mmap_mem_buf *buf;
+ u64 user_mem_size;
+ u64 handle;
+ int rc;
+
+ /* We use the page offset to hold the idr and thus we need to clear
+ * it before doing the mmap itself
+ */
+ handle = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_pgoff = 0;
+
+ /* Reference was taken here */
+ buf = hl_mmap_mem_buf_get(mmg, handle);
+ if (!buf) {
+ dev_err(mmg->dev,
+ "Memory mmap failed, no match to handle %#llx\n", handle);
+ return -EINVAL;
+ }
+
+ /* Validation check */
+ user_mem_size = vma->vm_end - vma->vm_start;
+ if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
+ dev_err(mmg->dev,
+ "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
+ buf->behavior->topic, user_mem_size, buf->mappable_size);
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
+ if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
+ user_mem_size)) {
+#else
+ if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
+ user_mem_size)) {
+#endif
+ dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
+ buf->behavior->topic, vma->vm_start);
+
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+ if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
+ dev_err(mmg->dev,
+ "%s, Memory mmap failed, already mmaped to user\n",
+ buf->behavior->topic);
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+ vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
+
+ /* Note: We're transferring the memory reference to vma->vm_private_data here. */
+
+ vma->vm_private_data = buf;
+
+ rc = buf->behavior->mmap(buf, vma, args);
+ if (rc) {
+ atomic_set(&buf->mmap, 0);
+ goto put_mem;
+ }
+
+ buf->real_mapped_size = buf->mappable_size;
+ vma->vm_pgoff = handle >> PAGE_SHIFT;
+
+ return 0;
+
+put_mem:
+ hl_mmap_mem_buf_put(buf);
+ return rc;
+}
+
+/**
+ * hl_mem_mgr_init - initialize unified memory manager
+ *
+ * @dev: owner device pointer
+ * @mmg: structure to initialize
+ *
+ * Initialize an instance of unified memory manager
+ */
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
+{
+ mmg->dev = dev;
+ spin_lock_init(&mmg->lock);
+ idr_init(&mmg->handles);
+}
+
+/**
+ * hl_mem_mgr_fini - release unified memory manager
+ *
+ * @mmg: parent unifed memory manager
+ *
+ * Release the unified memory manager. Shall be called from an interrupt context.
+ */
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
+{
+ struct hl_mmap_mem_buf *buf;
+ struct idr *idp;
+ const char *topic;
+ u32 id;
+
+ idp = &mmg->handles;
+
+ idr_for_each_entry(idp, buf, id) {
+ topic = buf->behavior->topic;
+ if (hl_mmap_mem_buf_put(buf) != 1)
+ dev_err(mmg->dev,
+ "%s: Buff handle %u for CTX is still alive\n",
+ topic, id);
+ }
+
+ /* TODO: can it happen that some buffer is still in use at this point? */
+
+ idr_destroy(&mmg->handles);
+}
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 810b73421ce1..f3734718d94f 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -9,6 +9,20 @@
#include "../habanalabs.h"
+/**
+ * hl_mmu_get_funcs() - get MMU functions structure
+ * @hdev: habanalabs device structure.
+ * @pgt_residency: page table residency.
+ * @is_dram_addr: true if we need HMMU functions
+ *
+ * @return appropriate MMU functions structure
+ */
+static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
+ bool is_dram_addr)
+{
+ return &hdev->mmu_func[pgt_residency];
+}
+
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -122,6 +136,53 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
}
/*
+ * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
+ *
+ * @hdev: pointer to device data.
+ * @mmu_prop: MMU properties.
+ * @page_size: page size
+ * @real_page_size: set here the actual page size to use for the operation
+ * @is_dram_addr: true if DRAM address, otherwise false.
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * note that this is general implementation that can fit most MMU arch. but as this is used as an
+ * MMU function:
+ * 1. it shall not be called directly- only from mmu_func structure instance
+ * 2. each MMU may modify the implementation internally
+ */
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr)
+{
+ /*
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
+ */
+ if ((page_size % mmu_prop->page_size) == 0) {
+ *real_page_size = mmu_prop->page_size;
+ return 0;
+ }
+
+ dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+ page_size, mmu_prop->page_size >> 10);
+
+ return -EFAULT;
+}
+
+static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
+ bool is_dram_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (is_dram_addr)
+ return &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ return &prop->pmmu_huge;
+
+ return &prop->pmmu;
+}
+
+/*
* hl_mmu_unmap_page - unmaps a virtual addr
*
* @ctx: pointer to the context structure
@@ -142,60 +203,35 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
* For optimization reasons PCI flush may be requested once after unmapping of
* large area.
*/
-int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
- bool flush_pte)
+int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 real_virt_addr;
+ struct hl_mmu_funcs *mmu_funcs;
+ int i, pgt_residency, rc = 0;
u32 real_page_size, npages;
- int i, rc = 0, pgt_residency;
+ u64 real_virt_addr;
bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
- if (is_dram_addr)
- mmu_prop = &prop->dmmu;
- else if ((page_size % prop->pmmu_huge.page_size) == 0)
- mmu_prop = &prop->pmmu_huge;
- else
- mmu_prop = &prop->pmmu;
+ mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
- /*
- * The H/W handles mapping of specific page sizes. Hence if the page
- * size is bigger, we break it to sub-pages and unmap them separately.
- */
- if ((page_size % mmu_prop->page_size) == 0) {
- real_page_size = mmu_prop->page_size;
- } else {
- /*
- * MMU page size may differ from DRAM page size.
- * In such case work with the DRAM page size and let the MMU
- * scrambling routine to handle this mismatch when
- * calculating the address to remove from the MMU page table
- */
- if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
- real_page_size = prop->dram_page_size;
- } else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't unmap\n",
- page_size, mmu_prop->page_size >> 10);
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- return -EFAULT;
- }
- }
+ rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+ is_dram_addr);
+ if (rc)
+ return rc;
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = hdev->mmu_func[pgt_residency].unmap(ctx,
- real_virt_addr, is_dram_addr);
+ rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
break;
@@ -203,7 +239,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
}
if (flush_pte)
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return rc;
}
@@ -230,15 +266,15 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
* For optimization reasons PCI flush may be requested once after mapping of
* large area.
*/
-int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size, bool flush_pte)
+int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool flush_pte)
{
+ int i, rc, pgt_residency, mapped_cnt = 0;
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
+ struct hl_mmu_funcs *mmu_funcs;
u32 real_page_size, npages;
- int i, rc, pgt_residency, mapped_cnt = 0;
bool is_dram_addr;
@@ -246,40 +282,15 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
- if (is_dram_addr)
- mmu_prop = &prop->dmmu;
- else if ((page_size % prop->pmmu_huge.page_size) == 0)
- mmu_prop = &prop->pmmu_huge;
- else
- mmu_prop = &prop->pmmu;
+ mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- /*
- * The H/W handles mapping of specific page sizes. Hence if the page
- * size is bigger, we break it to sub-pages and map them separately.
- */
- if ((page_size % mmu_prop->page_size) == 0) {
- real_page_size = mmu_prop->page_size;
- } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
- (prop->dram_page_size < mmu_prop->page_size)) {
- /*
- * MMU page size may differ from DRAM page size.
- * In such case work with the DRAM page size and let the MMU
- * scrambling routine handle this mismatch when calculating
- * the address to place in the MMU page table. (in that case
- * also make sure that the dram_page_size smaller than the
- * mmu page size)
- */
- real_page_size = prop->dram_page_size;
- } else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't map\n",
- page_size, mmu_prop->page_size >> 10);
-
- return -EFAULT;
- }
+ rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+ is_dram_addr);
+ if (rc)
+ return rc;
/*
* Verify that the phys and virt addresses are aligned with the
@@ -302,9 +313,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = hdev->mmu_func[pgt_residency].map(ctx,
- real_virt_addr, real_phys_addr,
- real_page_size, is_dram_addr);
+ rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
+ is_dram_addr);
if (rc)
goto err;
@@ -314,22 +324,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
if (flush_pte)
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return 0;
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (hdev->mmu_func[pgt_residency].unmap(ctx,
- real_virt_addr, is_dram_addr))
+ if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
real_virt_addr += real_page_size;
}
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return rc;
}
@@ -480,11 +489,9 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops,
u64 *phys_addr)
{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
- u32 hop0_shift_off;
- void *p;
+ struct hl_mmu_properties *mmu_prop;
/* last hop holds the phys address and flags */
if (hops->unscrambled_paddr)
@@ -493,11 +500,11 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
- p = &prop->pmmu_huge;
+ mmu_prop = &prop->pmmu_huge;
else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
- p = &prop->pmmu;
+ mmu_prop = &prop->pmmu;
else /* HL_VA_RANGE_TYPE_DRAM */
- p = &prop->dmmu;
+ mmu_prop = &prop->dmmu;
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
@@ -508,7 +515,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
/*
* Bit arithmetics cannot be used for non power of two page
* sizes. In addition, since bit arithmetics is not used,
- * we cannot ignore dram base. All that shall be considerd.
+ * we cannot ignore dram base. All that shall be considered.
*/
dram_page_size = prop->dram_page_size;
@@ -526,10 +533,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
* structure in order to determine the right masks
* for the page offset.
*/
- hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
- p = (char *)p + hop0_shift_off;
- p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
- hop_shift = *(u64 *)p;
+ hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1];
offset_mask = (1ull << hop_shift) - 1;
addr_mask = ~(offset_mask);
*phys_addr = (tmp_phys_addr & addr_mask) |
@@ -557,40 +561,39 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct asic_fixed_properties *prop;
struct hl_mmu_properties *mmu_prop;
- int rc;
+ struct hl_mmu_funcs *mmu_funcs;
+ int pgt_residency, rc;
bool is_dram_addr;
if (!hdev->mmu_enable)
return -EOPNOTSUPP;
+ prop = &hdev->asic_prop;
hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->dmmu.start_addr,
- prop->dmmu.end_addr);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
- /* host-residency is the same in PMMU and HPMMU, use one of them */
+ /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
mutex_lock(&ctx->mmu_lock);
-
- if (mmu_prop->host_resident)
- rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx,
- virt_addr, hops);
- else
- rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx,
- virt_addr, hops);
-
+ rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
mutex_unlock(&ctx->mmu_lock);
+ if (rc)
+ return rc;
+
/* add page offset to physical address */
if (hops->unscrambled_paddr)
- hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
- &hops->unscrambled_paddr);
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr);
- return rc;
+ return 0;
}
int hl_mmu_if_set_funcs(struct hl_device *hdev)
@@ -662,6 +665,55 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
return rc;
}
+static void hl_mmu_prefetch_work_function(struct work_struct *work)
+{
+ struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
+ struct hl_ctx *ctx = pfw->ctx;
+
+ if (!hl_device_operational(ctx->hdev, NULL))
+ goto put_ctx;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
+ pfw->va, pfw->size);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+put_ctx:
+ /*
+ * context was taken in the common mmu prefetch function- see comment there about
+ * context handling.
+ */
+ hl_ctx_put(ctx);
+ kfree(pfw);
+}
+
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
+{
+ struct hl_prefetch_work *handle_pf_work;
+
+ handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL);
+ if (!handle_pf_work)
+ return -ENOMEM;
+
+ INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function);
+ handle_pf_work->ctx = ctx;
+ handle_pf_work->va = va;
+ handle_pf_work->size = size;
+ handle_pf_work->flags = flags;
+ handle_pf_work->asid = asid;
+
+ /*
+ * as actual prefetch is done in a WQ we must get the context (and put it
+ * at the end of the work function)
+ */
+ hl_ctx_get(ctx);
+ queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work);
+
+ return 0;
+}
+
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX;
@@ -670,6 +722,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
/**
* hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
* @ctx: pointer to the context structure to initialize.
+ * @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
* @virt_addr: virtual address fro the translation.
@@ -686,33 +739,8 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m
return U64_MAX;
}
- /* currently max number of HOPs is 6 */
- switch (hop_idx) {
- case 0:
- mask = mmu_prop->hop0_mask;
- shift = mmu_prop->hop0_shift;
- break;
- case 1:
- mask = mmu_prop->hop1_mask;
- shift = mmu_prop->hop1_shift;
- break;
- case 2:
- mask = mmu_prop->hop2_mask;
- shift = mmu_prop->hop2_shift;
- break;
- case 3:
- mask = mmu_prop->hop3_mask;
- shift = mmu_prop->hop3_shift;
- break;
- case 4:
- mask = mmu_prop->hop4_mask;
- shift = mmu_prop->hop4_shift;
- break;
- default:
- mask = mmu_prop->hop5_mask;
- shift = mmu_prop->hop5_shift;
- break;
- }
+ shift = mmu_prop->hop_shifts[hop_idx];
+ mask = mmu_prop->hop_masks[hop_idx];
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index d03786d0c407..e2d91a69acc2 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
+#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
+
static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
@@ -170,51 +172,15 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
return num_of_ptes_left;
}
-static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr, u64 mask, u64 shift)
-{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & mask) >> shift);
-}
-
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
- mmu_prop->hop0_shift);
-}
-
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
- mmu_prop->hop1_shift);
-}
-
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
+static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
+ u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
- mmu_prop->hop2_shift);
-}
+ u64 mask, shift;
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
- mmu_prop->hop3_shift);
-}
-
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
- mmu_prop->hop4_shift);
+ mask = mmu_prop->hop_masks[hop_idx];
+ shift = mmu_prop->hop_shifts[hop_idx];
+ return hop_addr_arr[hop_idx] +
+ ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
@@ -516,74 +482,50 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
}
}
-static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
+static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
u64 virt_addr, bool is_dram_addr)
{
+ u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte;
bool is_huge, clear_hop3 = true;
+ int hop_idx;
/* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop1_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop2_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
-
- hop3_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop3_addr == ULLONG_MAX)
- goto not_mapped;
+ for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
+ if (hop_idx == MMU_HOP0) {
+ hop_addr[hop_idx] = get_hop0_addr(ctx);
+ } else {
+ hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
+ goto not_mapped;
+ }
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
+ }
is_huge = curr_pte & mmu_prop->last_mask;
if (is_dram_addr && !is_huge) {
- dev_err(hdev->dev,
- "DRAM unmapping should use huge pages only\n");
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!is_huge) {
- hop4_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop4_addr == ULLONG_MAX)
+ hop_idx = MMU_HOP4;
+ hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
-
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
clear_hop3 = false;
}
@@ -605,39 +547,33 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
goto not_mapped;
}
- write_final_pte(ctx, hop3_pte_addr, default_pte);
- put_pte(ctx, hop3_addr);
+ hop_idx = MMU_HOP3;
+ write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
+ put_pte(ctx, hop_addr[hop_idx]);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
- if (hop4_addr)
- clear_pte(ctx, hop4_pte_addr);
+ if (hop_addr[MMU_HOP4])
+ clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
else
- clear_pte(ctx, hop3_pte_addr);
+ clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
- if (hop4_addr && !put_pte(ctx, hop4_addr))
+ if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
clear_hop3 = true;
if (!clear_hop3)
goto mapped;
- clear_pte(ctx, hop3_pte_addr);
+ for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
+ clear_pte(ctx, hop_pte_addr[hop_idx]);
- if (put_pte(ctx, hop3_addr))
- goto mapped;
+ if (hop_idx == MMU_HOP0)
+ break;
- clear_pte(ctx, hop2_pte_addr);
-
- if (put_pte(ctx, hop2_addr))
- goto mapped;
-
- clear_pte(ctx, hop1_pte_addr);
-
- if (put_pte(ctx, hop1_addr))
- goto mapped;
-
- clear_pte(ctx, hop0_pte_addr);
+ if (put_pte(ctx, hop_addr[hop_idx]))
+ goto mapped;
+ }
}
mapped:
@@ -650,21 +586,15 @@ not_mapped:
return -EINVAL;
}
-static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
+ u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte = 0;
- bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge;
- int rc = -ENOMEM;
+ bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
+ int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
/*
* This mapping function can map a page or a huge page. For huge page
@@ -684,39 +614,21 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
is_huge = false;
}
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
- if (hop1_addr == ULLONG_MAX)
- goto err;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
- if (hop2_addr == ULLONG_MAX)
- goto err;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
+ num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
- hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
- if (hop3_addr == ULLONG_MAX)
- goto err;
-
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
-
- if (!is_huge) {
- hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
- if (hop4_addr == ULLONG_MAX)
- goto err;
+ for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
+ if (hop_idx == MMU_HOP0) {
+ hop_addr[hop_idx] = get_hop0_addr(ctx);
+ } else {
+ hop_addr[hop_idx] =
+ get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
+ goto err;
+ }
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
@@ -732,30 +644,22 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- if (hop1_new || hop2_new || hop3_new || hop4_new) {
- dev_err(hdev->dev,
- "DRAM mapping should not allocate more hops\n");
- rc = -EFAULT;
- goto err;
+ for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+ if (hop_new[hop_idx]) {
+ dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
+ rc = -EFAULT;
+ goto err;
+ }
}
} else if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev,
"mapping already exists for virt_addr 0x%llx\n",
virt_addr);
- dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
- dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
- dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
- dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
-
- if (!is_huge)
- dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop4_pte_addr,
- hop4_pte_addr);
+ for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
+ *(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
+ hop_pte_addr[hop_idx]);
rc = -EINVAL;
goto err;
@@ -764,53 +668,28 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
- if (is_huge)
- write_final_pte(ctx, hop3_pte_addr, curr_pte);
- else
- write_final_pte(ctx, hop4_pte_addr, curr_pte);
+ write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
- if (hop1_new) {
- curr_pte =
- (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_pte_addr, curr_pte);
- }
- if (hop2_new) {
- curr_pte =
- (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_pte_addr, curr_pte);
- get_pte(ctx, hop1_addr);
- }
- if (hop3_new) {
- curr_pte =
- (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, curr_pte);
- get_pte(ctx, hop2_addr);
- }
+ for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+ prev_hop = hop_idx - 1;
- if (!is_huge) {
- if (hop4_new) {
- curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- write_pte(ctx, hop3_pte_addr, curr_pte);
- get_pte(ctx, hop3_addr);
+ if (hop_new[hop_idx]) {
+ curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
+ if (hop_idx != MMU_HOP1)
+ get_pte(ctx, hop_addr[prev_hop]);
}
-
- get_pte(ctx, hop4_addr);
- } else {
- get_pte(ctx, hop3_addr);
}
+ get_pte(ctx, hop_addr[num_hops - 1]);
+
return 0;
err:
- if (hop4_new)
- free_hop(ctx, hop4_addr);
- if (hop3_new)
- free_hop(ctx, hop3_addr);
- if (hop2_new)
- free_hop(ctx, hop2_addr);
- if (hop1_new)
- free_hop(ctx, hop1_addr);
+ for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
+ if (hop_new[hop_idx])
+ free_hop(ctx, hop_addr[hop_idx]);
+ }
return rc;
}
@@ -928,8 +807,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
mmu->fini = hl_mmu_v1_fini;
mmu->ctx_init = hl_mmu_v1_ctx_init;
mmu->ctx_fini = hl_mmu_v1_ctx_fini;
- mmu->map = _hl_mmu_v1_map;
- mmu->unmap = _hl_mmu_v1_unmap;
+ mmu->map = hl_mmu_v1_map;
+ mmu->unmap = hl_mmu_v1_unmap;
mmu->flush = flush;
mmu->swap_out = hl_mmu_v1_swap_out;
mmu->swap_in = hl_mmu_v1_swap_in;
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index bb9ce22bafc4..610acd4a8057 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -392,6 +392,7 @@ enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
*/
int hl_pci_init(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
int rc;
@@ -419,17 +420,14 @@ int hl_pci_init(struct hl_device *hdev)
}
/* Driver must sleep in order for FW to finish the iATU configuration */
- if (hdev->asic_prop.iatu_done_by_fw) {
+ if (hdev->asic_prop.iatu_done_by_fw)
usleep_range(2000, 3000);
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
- }
- rc = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(hdev->dma_mask));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask));
if (rc) {
dev_err(hdev->dev,
"Failed to set dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
+ prop->dma_mask, rc);
goto unmap_pci_bars;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 21c2b678ff72..fba322241096 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -95,7 +95,7 @@
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
-#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
+#define GAUDI_ARB_WDT_TIMEOUT 0xEE6b27FF /* 8 seconds */
#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
BIT(GAUDI_ENGINE_ID_MME_0) |\
@@ -557,6 +557,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
}
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+ prop->host_base_address = HOST_PHYS_BASE;
+ prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -595,18 +597,19 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = false;
- prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
- prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
- prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
- prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
- prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
- prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
- prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
- prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
- prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
- prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
+ prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT;
+ prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK;
+ prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK;
+ prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK;
+ prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK;
+ prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK;
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
@@ -673,6 +676,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->set_max_power_on_device_init = true;
+ prop->dma_mask = 48;
+
return 0;
}
@@ -754,8 +759,6 @@ static int gaudi_init_iatu(struct hl_device *hdev)
if (rc)
goto done;
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
@@ -1008,7 +1011,7 @@ free_job:
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -1470,7 +1473,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
job->patched_cb = NULL;
job->job_cb_size = job->user_cb_size;
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
if (hw_queue_prop->type == QUEUE_TYPE_EXT)
@@ -2808,9 +2811,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
QMAN_EXTERNAL_MAKE_TRUSTED);
@@ -2987,9 +2989,8 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
@@ -3124,9 +3125,8 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
@@ -3258,9 +3258,8 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
@@ -3409,9 +3408,8 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
@@ -3792,9 +3790,6 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_
{
u32 wait_timeout_ms;
- dev_info(hdev->dev,
- "Halting compute engines and disabling interrupts\n");
-
if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
else
@@ -4212,7 +4207,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
}
if (fw_reset) {
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
@@ -4304,11 +4299,11 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
}
@@ -4745,12 +4740,11 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
-static int gaudi_hbm_scrubbing(struct hl_device *hdev)
+static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 cur_addr = DRAM_BASE_ADDR_USER;
- u32 val;
- u32 chunk_size;
+ u32 chunk_size, busy;
int rc, dma_id;
while (cur_addr < prop->dram_end_address) {
@@ -4764,8 +4758,10 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset,
+ lower_32_bits(val));
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset,
+ upper_32_bits(val));
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -4788,8 +4784,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
rc = hl_poll_timeout(
hdev,
mmDMA0_CORE_STS0 + dma_offset,
- val,
- ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+ busy,
+ ((busy & DMA0_CORE_STS0_BUSY_MASK) == 0),
1000,
HBM_SCRUBBING_TIMEOUT_US);
@@ -4843,7 +4839,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
}
/* Scrub HBM using all DMA channels in parallel */
- rc = gaudi_hbm_scrubbing(hdev);
+ rc = gaudi_scrub_device_dram(hdev, 0xdeadbeaf);
if (rc)
dev_err(hdev->dev,
"Failed to clear HBM in mem scrub all\n");
@@ -5038,37 +5034,7 @@ static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
- return -ENOMEM;
-
- /* Shift to the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address += HOST_PHYS_BASE;
-
- return 0;
-}
-
-static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- /* Cancel the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address -= HOST_PHYS_BASE;
-
- dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
-static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
- struct sg_table *sgt)
+static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt;
@@ -5077,8 +5043,7 @@ static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
@@ -5132,8 +5097,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
list_add_tail(&userptr->job_node, parser->job_userptr_list);
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, dir);
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
@@ -5408,7 +5372,7 @@ static int gaudi_patch_dma_packet(struct hl_device *hdev,
sgt = userptr->sgt;
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
@@ -5562,7 +5526,7 @@ static int gaudi_patch_cb(struct hl_device *hdev,
static int gaudi_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
@@ -5578,9 +5542,9 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
else
parser->patched_cb_size = parser->user_cb_size;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
@@ -5589,13 +5553,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
return rc;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -5635,8 +5596,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
@@ -5644,7 +5604,7 @@ out:
static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
int rc;
rc = gaudi_validate_cb(hdev, parser, false);
@@ -5652,22 +5612,19 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -5684,8 +5641,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
@@ -5798,7 +5754,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
- u64 id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5865,9 +5820,8 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
}
release_cb:
- id = cb->id;
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -5930,7 +5884,7 @@ static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -6101,184 +6055,6 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev)
}
-static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
- *val = RREG32(addr - CFG_BASE);
-
- } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
- *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- *val = readl(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
- WREG32(addr - CFG_BASE, val);
-
- } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
- writel(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- writel(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
- u32 val_l = RREG32(addr - CFG_BASE);
- u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
- *val = (((u64) val_h) << 32) | val_l;
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
- *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- *val = readq(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
- WREG32(addr - CFG_BASE, lower_32_bits(val));
- WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
- writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- writeq(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
u32 size_to_dma, dma_addr_t dma_addr)
{
@@ -7628,19 +7404,18 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
/* In case it's the first razwi, save its parameters*/
- rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
+ rc = atomic_cmpxchg(&hdev->last_error.razwi.write_disable, 0, 1);
if (!rc) {
- hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
- hdev->last_error.razwi_timestamp = ktime_get();
- hdev->last_error.razwi_addr = razwi_addr;
- hdev->last_error.razwi_engine_id_1 = engine_id_1;
- hdev->last_error.razwi_engine_id_2 = engine_id_2;
+ hdev->last_error.razwi.timestamp = ktime_get();
+ hdev->last_error.razwi.addr = razwi_addr;
+ hdev->last_error.razwi.engine_id_1 = engine_id_1;
+ hdev->last_error.razwi.engine_id_2 = engine_id_2;
/*
* If first engine id holds non valid value the razwi initiator
* does not have engine id
*/
- hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
- hdev->last_error.razwi_type = razwi_type;
+ hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX);
+ hdev->last_error.razwi.type = razwi_type;
}
}
@@ -8103,7 +7878,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_MMU_PAGE_FAULT:
case GAUDI_EVENT_MMU_WR_PERM:
case GAUDI_EVENT_RAZWI_OR_ADC:
- case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
fallthrough;
@@ -8123,6 +7897,19 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
hl_fw_unmask_irq(hdev, event_type);
break;
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_qman_err(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+
+ /* In TPC QM event, notify on TPC assertion. While there isn't
+ * a specific event for assertion yet, the FW generates QM event.
+ * The SW upper layer will inspect an internal mapped area to indicate
+ * if the event is a tpc assertion or tpc QM.
+ */
+ hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT);
+ break;
+
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true);
goto reset_device;
@@ -8328,8 +8115,6 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
set_default_power_values(hdev);
- hdev->max_power = prop->max_power_default;
-
return 0;
}
@@ -8501,6 +8286,16 @@ static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
+static int gaudi_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_get_monitor_dump(hdev, data);
+}
+
/*
* this function should be used only during initialization and/or after reset,
* when there are no active users.
@@ -9066,11 +8861,6 @@ static void gaudi_reset_sob(struct hl_device *hdev, void *data)
kref_init(&hw_sob->kref);
}
-static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
-{
- hdev->dma_mask = 48;
-}
-
static u64 gaudi_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -9132,7 +8922,7 @@ static int gaudi_add_sync_to_engine_map_entry(
*/
if (reg_value == 0 || reg_value == 0xffffffff)
return 0;
- reg_value -= (u32)CFG_BASE;
+ reg_value -= lower_32_bits(CFG_BASE);
/* create a new hash entry */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -9377,6 +9167,12 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
return gaudi_stream_master;
}
+static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+ /* set 0 since multiple pages are not supported */
+ info->page_order_bitmask = 0;
+}
+
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
@@ -9418,24 +9214,21 @@ static const struct hl_asic_funcs gaudi_funcs = {
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
.asic_dma_free_coherent = gaudi_dma_free_coherent,
.scrub_device_mem = gaudi_scrub_device_mem,
+ .scrub_device_dram = gaudi_scrub_device_dram,
.get_int_queue_base = gaudi_get_int_queue_base,
.test_queues = gaudi_test_queues,
.asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
.asic_dma_pool_free = gaudi_dma_pool_free,
.cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
- .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
+ .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = gaudi_cs_parser,
- .asic_dma_map_sg = gaudi_dma_map_sg,
+ .asic_dma_map_sgtable = hl_dma_map_sgtable,
.get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
.add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
.update_eq_ci = gaudi_update_eq_ci,
.context_switch = gaudi_context_switch,
.restore_phase_topology = gaudi_restore_phase_topology,
- .debugfs_read32 = gaudi_debugfs_read32,
- .debugfs_write32 = gaudi_debugfs_write32,
- .debugfs_read64 = gaudi_debugfs_read64,
- .debugfs_write64 = gaudi_debugfs_write64,
.debugfs_read_dma = gaudi_debugfs_read_dma,
.add_device_attr = gaudi_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
@@ -9444,6 +9237,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.write_pte = gaudi_write_pte,
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
+ .mmu_prefetch_cache_range = NULL,
.send_heartbeat = gaudi_send_heartbeat,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
@@ -9452,6 +9246,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.hw_queues_unlock = gaudi_hw_queues_unlock,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
+ .get_monitor_dump = gaudi_get_monitor_dump,
.send_cpu_message = gaudi_send_cpu_message,
.pci_bars_map = gaudi_pci_bars_map,
.init_iatu = gaudi_init_iatu,
@@ -9469,7 +9264,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
.gen_wait_cb = gaudi_gen_wait_cb,
.reset_sob = gaudi_reset_sob,
.reset_sob_group = gaudi_reset_sob_group,
- .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
.get_device_time = gaudi_get_device_time,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
.collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
@@ -9486,7 +9280,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
.get_sob_addr = gaudi_get_sob_addr,
.set_pci_memory_regions = gaudi_set_pci_memory_regions,
.get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL
+ .is_valid_dram_page_size = NULL,
+ .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+ .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders,
+ .access_dev_mem = hl_access_dev_mem,
+ .set_dram_bar_base = gaudi_set_hbm_bar_base,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 54de7c599072..4fbcf3f0afe5 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -148,14 +148,14 @@
#define MME_QMAN_LENGTH 1024
#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define HBM_DMA_QMAN_LENGTH 1024
+#define HBM_DMA_QMAN_LENGTH 4096
#define HBM_DMA_QMAN_SIZE_IN_BYTES \
(HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
#define TPC_QMAN_LENGTH 1024
#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define NIC_QMAN_LENGTH 1024
+#define NIC_QMAN_LENGTH 4096
#define NIC_QMAN_SIZE_IN_BYTES (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index ec9358bcbf0b..4cde505a7416 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -390,6 +390,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
}
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+ prop->host_base_address = HOST_PHYS_BASE;
+ prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -413,18 +415,19 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = true;
- prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT;
- prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT;
- prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT;
- prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT;
- prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT;
- prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK;
- prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK;
- prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK;
- prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK;
- prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK;
+ prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
+ prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
+ prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
+ prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
+ prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
+ prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
@@ -487,6 +490,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->set_max_power_on_device_init = true;
+ prop->dma_mask = 48;
+
return 0;
}
@@ -574,8 +579,6 @@ static int goya_init_iatu(struct hl_device *hdev)
if (rc)
goto done;
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
@@ -2479,9 +2482,6 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_r
{
u32 wait_timeout_ms;
- dev_info(hdev->dev,
- "Halting compute engines and disabling interrupts\n");
-
if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
else
@@ -2825,12 +2825,12 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
goya_set_pll_refclk(hdev);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued SOFT reset command, going to wait %dms\n",
reset_timeout_ms);
}
@@ -3311,35 +3311,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
- return -ENOMEM;
-
- /* Shift to the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address += HOST_PHYS_BASE;
-
- return 0;
-}
-
-static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- /* Cancel the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address -= HOST_PHYS_BASE;
-
- dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
@@ -3349,8 +3320,7 @@ u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
@@ -3404,8 +3374,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
list_add_tail(&userptr->job_node, parser->job_userptr_list);
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, dir);
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
@@ -3869,7 +3838,7 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
sgt = userptr->sgt;
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
@@ -4032,7 +4001,7 @@ static int goya_patch_cb(struct hl_device *hdev,
static int goya_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
@@ -4045,9 +4014,9 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
@@ -4056,13 +4025,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
return rc;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -4102,8 +4068,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
@@ -4111,7 +4076,7 @@ out:
static int goya_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
int rc;
rc = goya_validate_cb(hdev, parser, false);
@@ -4119,22 +4084,19 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -4151,8 +4113,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
@@ -4259,224 +4220,7 @@ static void goya_clear_sm_regs(struct hl_device *hdev)
i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
}
-/*
- * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
- * address.
- *
- * @hdev: pointer to hl_device structure
- * @addr: device or host mapped address
- * @val: returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows reading from the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- *val = RREG32(addr - CFG_BASE);
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
- *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/*
- * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
- * address.
- *
- * @hdev: pointer to hl_device structure
- * @addr: device or host mapped address
- * @val: returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows writing to the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- WREG32(addr - CFG_BASE, val);
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
- writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- writel(val, hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- u32 val_l = RREG32(addr - CFG_BASE);
- u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
- *val = (((u64) val_h) << 32) | val_l;
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
- *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- WREG32(addr - CFG_BASE, lower_32_bits(val));
- WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
- writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
- void *blob_addr)
+static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
{
dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
return -EPERM;
@@ -5101,7 +4845,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -5561,11 +5305,6 @@ static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
}
-static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
-{
- hdev->dma_mask = 48;
-}
-
u64 goya_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -5678,6 +5417,22 @@ static u32 *goya_get_stream_master_qid_arr(void)
return NULL;
}
+static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+ /* set 0 since multiple pages are not supported */
+ info->page_order_bitmask = 0;
+}
+
+static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
+{
+ return -EOPNOTSUPP;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5696,24 +5451,21 @@ static const struct hl_asic_funcs goya_funcs = {
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.scrub_device_mem = goya_scrub_device_mem,
+ .scrub_device_dram = goya_scrub_device_dram,
.get_int_queue_base = goya_get_int_queue_base,
.test_queues = goya_test_queues,
.asic_dma_pool_zalloc = goya_dma_pool_zalloc,
.asic_dma_pool_free = goya_dma_pool_free,
.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
- .hl_dma_unmap_sg = goya_dma_unmap_sg,
+ .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = goya_cs_parser,
- .asic_dma_map_sg = goya_dma_map_sg,
+ .asic_dma_map_sgtable = hl_dma_map_sgtable,
.get_dma_desc_list_size = goya_get_dma_desc_list_size,
.add_end_of_cb_packets = goya_add_end_of_cb_packets,
.update_eq_ci = goya_update_eq_ci,
.context_switch = goya_context_switch,
.restore_phase_topology = goya_restore_phase_topology,
- .debugfs_read32 = goya_debugfs_read32,
- .debugfs_write32 = goya_debugfs_write32,
- .debugfs_read64 = goya_debugfs_read64,
- .debugfs_write64 = goya_debugfs_write64,
.debugfs_read_dma = goya_debugfs_read_dma,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
@@ -5722,6 +5474,7 @@ static const struct hl_asic_funcs goya_funcs = {
.write_pte = goya_write_pte,
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
+ .mmu_prefetch_cache_range = NULL,
.send_heartbeat = goya_send_heartbeat,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
@@ -5730,6 +5483,7 @@ static const struct hl_asic_funcs goya_funcs = {
.hw_queues_unlock = goya_hw_queues_unlock,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
+ .get_monitor_dump = goya_get_monitor_dump,
.send_cpu_message = goya_send_cpu_message,
.pci_bars_map = goya_pci_bars_map,
.init_iatu = goya_init_iatu,
@@ -5747,7 +5501,6 @@ static const struct hl_asic_funcs goya_funcs = {
.gen_wait_cb = goya_gen_wait_cb,
.reset_sob = goya_reset_sob,
.reset_sob_group = goya_reset_sob_group,
- .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
.get_device_time = goya_get_device_time,
.collective_wait_init_cs = goya_collective_wait_init_cs,
.collective_wait_create_jobs = goya_collective_wait_create_jobs,
@@ -5764,7 +5517,11 @@ static const struct hl_asic_funcs goya_funcs = {
.get_sob_addr = &goya_get_sob_addr,
.set_pci_memory_regions = goya_set_pci_memory_regions,
.get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL
+ .is_valid_dram_page_size = NULL,
+ .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+ .get_valid_dram_page_orders = goya_get_valid_dram_page_orders,
+ .access_dev_mem = hl_access_dev_mem,
+ .set_dram_bar_base = goya_set_ddr_bar_base,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 65668dac6a5f..38e44b6cf581 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -389,6 +389,14 @@ enum pq_init_status {
*
* CPUCP_PACKET_ENGINE_CORE_ASID_SET -
* Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Relevant only to Gaudi.
*/
enum cpucp_packet_id {
@@ -439,6 +447,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_POWER_SET, /* internal */
CPUCP_PACKET_RESERVED, /* not used */
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_RESERVED3, /* not used */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -555,6 +568,12 @@ struct cpucp_array_data_packet {
__le32 data[];
};
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX
+};
+
enum cpucp_packet_rc {
cpucp_packet_success,
cpucp_packet_invalid,
@@ -576,7 +595,10 @@ enum cpucp_temp_type {
cpucp_temp_offset = 19,
cpucp_temp_lowest = 21,
cpucp_temp_highest = 22,
- cpucp_temp_reset_history = 23
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
};
enum cpucp_in_attributes {
@@ -686,6 +708,7 @@ enum pll_index {
enum rl_index {
TPC_RL = 0,
MME_RL,
+ EDMA_RL,
};
enum pvt_index {
@@ -820,6 +843,7 @@ enum cpucp_serdes_type {
TYPE_2_SERDES_TYPE,
HLS1_SERDES_TYPE,
HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
UNKNOWN_SERDES_TYPE,
MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
};
@@ -833,9 +857,28 @@ struct cpucp_nic_info {
__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
__le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
__u8 reserved[6];
};
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
/*
* struct cpucp_nic_status - describes the status of a NIC port.
* @port: NIC port index.
@@ -889,4 +932,29 @@ struct cpucp_hbm_row_replaced_rows_info {
struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
};
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. relevant only to Gaudi.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[512];
+ __le32 mon_pay_addrh[512];
+ __le32 mon_pay_data[512];
+ __le32 mon_arm[512];
+ __le32 mon_status[512];
+};
+
+/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 758f246627f8..cae8ac8bc5b1 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -34,4 +34,14 @@
#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
+enum mmu_hop_num {
+ MMU_HOP0,
+ MMU_HOP1,
+ MMU_HOP2,
+ MMU_HOP3,
+ MMU_HOP4,
+ MMU_HOP5,
+ MMU_HOP_MAX,
+};
+
#endif /* INCLUDE_MMU_GENERAL_H_ */
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index f21854ac5cc2..009239ad1d8a 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -68,40 +68,40 @@ void __init lkdtm_bugs_init(int *recur_param)
recur_count = *recur_param;
}
-void lkdtm_PANIC(void)
+static void lkdtm_PANIC(void)
{
panic("dumptest");
}
-void lkdtm_BUG(void)
+static void lkdtm_BUG(void)
{
BUG();
}
static int warn_counter;
-void lkdtm_WARNING(void)
+static void lkdtm_WARNING(void)
{
WARN_ON(++warn_counter);
}
-void lkdtm_WARNING_MESSAGE(void)
+static void lkdtm_WARNING_MESSAGE(void)
{
WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
-void lkdtm_EXCEPTION(void)
+static void lkdtm_EXCEPTION(void)
{
*((volatile int *) 0) = 0;
}
-void lkdtm_LOOP(void)
+static void lkdtm_LOOP(void)
{
for (;;)
;
}
-void lkdtm_EXHAUST_STACK(void)
+static void lkdtm_EXHAUST_STACK(void)
{
pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
@@ -115,7 +115,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack)
}
/* This should trip the stack canary, not corrupt the return address. */
-noinline void lkdtm_CORRUPT_STACK(void)
+static noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
@@ -125,7 +125,7 @@ noinline void lkdtm_CORRUPT_STACK(void)
}
/* Same as above but will only get a canary with -fstack-protector-strong */
-noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
{
union {
unsigned short shorts[4];
@@ -139,7 +139,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
static pid_t stack_pid;
static unsigned long stack_addr;
-void lkdtm_REPORT_STACK(void)
+static void lkdtm_REPORT_STACK(void)
{
volatile uintptr_t magic;
pid_t pid = task_pid_nr(current);
@@ -222,7 +222,7 @@ static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
}
}
-void lkdtm_REPORT_STACK_CANARY(void)
+static void lkdtm_REPORT_STACK_CANARY(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *)) = { };
@@ -230,7 +230,7 @@ void lkdtm_REPORT_STACK_CANARY(void)
__lkdtm_REPORT_STACK_CANARY((void *)&data);
}
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
@@ -245,21 +245,21 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}
-void lkdtm_SOFTLOCKUP(void)
+static void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
-void lkdtm_HARDLOCKUP(void)
+static void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
-void lkdtm_SPINLOCKUP(void)
+static void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
@@ -267,7 +267,7 @@ void lkdtm_SPINLOCKUP(void)
__release(&lock_me_up);
}
-void lkdtm_HUNG_TASK(void)
+static void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
@@ -276,7 +276,7 @@ void lkdtm_HUNG_TASK(void)
volatile unsigned int huge = INT_MAX - 2;
volatile unsigned int ignored;
-void lkdtm_OVERFLOW_SIGNED(void)
+static void lkdtm_OVERFLOW_SIGNED(void)
{
int value;
@@ -291,7 +291,7 @@ void lkdtm_OVERFLOW_SIGNED(void)
}
-void lkdtm_OVERFLOW_UNSIGNED(void)
+static void lkdtm_OVERFLOW_UNSIGNED(void)
{
unsigned int value;
@@ -319,7 +319,7 @@ struct array_bounds {
int three;
};
-void lkdtm_ARRAY_BOUNDS(void)
+static void lkdtm_ARRAY_BOUNDS(void)
{
struct array_bounds_flex_array *not_checked;
struct array_bounds *checked;
@@ -327,6 +327,11 @@ void lkdtm_ARRAY_BOUNDS(void)
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
@@ -346,10 +351,13 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
- pr_expected_config(CONFIG_UBSAN_BOUNDS);
+ if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
-void lkdtm_CORRUPT_LIST_ADD(void)
+static void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
* Initially, an empty list via LIST_HEAD:
@@ -389,7 +397,7 @@ void lkdtm_CORRUPT_LIST_ADD(void)
}
}
-void lkdtm_CORRUPT_LIST_DEL(void)
+static void lkdtm_CORRUPT_LIST_DEL(void)
{
LIST_HEAD(test_head);
struct lkdtm_list item;
@@ -417,7 +425,7 @@ void lkdtm_CORRUPT_LIST_DEL(void)
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
-void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack - 1;
@@ -431,7 +439,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack + THREAD_SIZE;
@@ -444,7 +452,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
-void lkdtm_UNSET_SMEP(void)
+static void lkdtm_UNSET_SMEP(void)
{
#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
@@ -510,7 +518,7 @@ void lkdtm_UNSET_SMEP(void)
#endif
}
-void lkdtm_DOUBLE_FAULT(void)
+static void lkdtm_DOUBLE_FAULT(void)
{
#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
@@ -558,7 +566,7 @@ static noinline void change_pac_parameters(void)
}
#endif
-noinline void lkdtm_CORRUPT_PAC(void)
+static noinline void lkdtm_CORRUPT_PAC(void)
{
#ifdef CONFIG_ARM64
#define CORRUPT_PAC_ITERATE 10
@@ -586,3 +594,37 @@ noinline void lkdtm_CORRUPT_PAC(void)
pr_err("XFAIL: this test is arm64-only\n");
#endif
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(DOUBLE_FAULT),
+ CRASHTYPE(CORRUPT_PAC),
+};
+
+struct crashtype_category bugs_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index c9aeddef1044..666a7f4bc137 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -3,6 +3,7 @@
* This is for all the tests relating directly to Control Flow Integrity.
*/
#include "lkdtm.h"
+#include <asm/page.h>
static int called_count;
@@ -22,7 +23,7 @@ static noinline int lkdtm_increment_int(int *counter)
/*
* This tries to call an indirect function with a mismatched prototype.
*/
-void lkdtm_CFI_FORWARD_PROTO(void)
+static void lkdtm_CFI_FORWARD_PROTO(void)
{
/*
* Matches lkdtm_increment_void()'s prototype, but not
@@ -41,3 +42,145 @@ void lkdtm_CFI_FORWARD_PROTO(void)
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
}
+
+/*
+ * This can stay local to LKDTM, as there should not be a production reason
+ * to disable PAC && SCS.
+ */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+# ifdef CONFIG_ARM64_BTI_KERNEL
+# define __no_pac "branch-protection=bti"
+# else
+# define __no_pac "branch-protection=none"
+# endif
+# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
+#else
+# define __no_ret_protection __noscs
+#endif
+
+#define no_pac_addr(addr) \
+ ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+
+/* The ultimate ROP gadget. */
+static noinline __no_ret_protection
+void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static noinline
+void set_return_addr(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static volatile int force_check;
+
+static void lkdtm_CFI_BACKWARD(void)
+{
+ /* Use calculated gotos to keep labels addressable. */
+ void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
+
+ pr_info("Attempting unchecked stack return address redirection ...\n");
+
+ /* Always false */
+ if (force_check) {
+ /*
+ * Prepare to call with NULLs to avoid parameters being treated as
+ * constants in -02.
+ */
+ set_return_addr_unchecked(NULL, NULL);
+ set_return_addr(NULL, NULL);
+ if (force_check)
+ goto *labels[1];
+ if (force_check)
+ goto *labels[2];
+ if (force_check)
+ goto *labels[3];
+ if (force_check)
+ goto *labels[4];
+ return;
+ }
+
+ /*
+ * Use fallthrough switch case to keep basic block ordering between
+ * set_return_addr*() and the label after it.
+ */
+ switch (force_check) {
+ case 0:
+ set_return_addr_unchecked(&&normal, &&redirected);
+ fallthrough;
+ case 1:
+normal:
+ /* Always true */
+ if (!force_check) {
+ pr_err("FAIL: stack return address manipulation failed!\n");
+ /* If we can't redirect "normally", we can't test mitigations. */
+ return;
+ }
+ break;
+ default:
+redirected:
+ pr_info("ok: redirected stack return address.\n");
+ break;
+ }
+
+ pr_info("Attempting checked stack return address redirection ...\n");
+
+ switch (force_check) {
+ case 0:
+ set_return_addr(&&check_normal, &&check_redirected);
+ fallthrough;
+ case 1:
+check_normal:
+ /* Always true */
+ if (!force_check) {
+ pr_info("ok: control flow unchanged.\n");
+ return;
+ }
+
+check_redirected:
+ pr_err("FAIL: stack return address was redirected!\n");
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+ pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
+ return;
+ }
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
+ pr_expected_config(CONFIG_SHADOW_CALL_STACK);
+ return;
+ }
+ pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
+ lkdtm_kernel_info,
+ "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(CFI_BACKWARD),
+};
+
+struct crashtype_category cfi_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index e2228b6fc09b..b4712ff196b4 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -86,109 +86,21 @@ static struct crashpoint crashpoints[] = {
#endif
};
-
-/* Crash types. */
-struct crashtype {
- const char *name;
- void (*func)(void);
-};
-
-#define CRASHTYPE(_name) \
- { \
- .name = __stringify(_name), \
- .func = lkdtm_ ## _name, \
- }
-
-/* Define the possible types of crashes that can be triggered. */
-static const struct crashtype crashtypes[] = {
- CRASHTYPE(PANIC),
- CRASHTYPE(BUG),
- CRASHTYPE(WARNING),
- CRASHTYPE(WARNING_MESSAGE),
- CRASHTYPE(EXCEPTION),
- CRASHTYPE(LOOP),
- CRASHTYPE(EXHAUST_STACK),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
- CRASHTYPE(REPORT_STACK),
- CRASHTYPE(REPORT_STACK_CANARY),
- CRASHTYPE(CORRUPT_LIST_ADD),
- CRASHTYPE(CORRUPT_LIST_DEL),
- CRASHTYPE(STACK_GUARD_PAGE_LEADING),
- CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
- CRASHTYPE(UNSET_SMEP),
- CRASHTYPE(CORRUPT_PAC),
- CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
- CRASHTYPE(SLAB_LINEAR_OVERFLOW),
- CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
- CRASHTYPE(WRITE_AFTER_FREE),
- CRASHTYPE(READ_AFTER_FREE),
- CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
- CRASHTYPE(READ_BUDDY_AFTER_FREE),
- CRASHTYPE(SLAB_INIT_ON_ALLOC),
- CRASHTYPE(BUDDY_INIT_ON_ALLOC),
- CRASHTYPE(SLAB_FREE_DOUBLE),
- CRASHTYPE(SLAB_FREE_CROSS),
- CRASHTYPE(SLAB_FREE_PAGE),
- CRASHTYPE(SOFTLOCKUP),
- CRASHTYPE(HARDLOCKUP),
- CRASHTYPE(SPINLOCKUP),
- CRASHTYPE(HUNG_TASK),
- CRASHTYPE(OVERFLOW_SIGNED),
- CRASHTYPE(OVERFLOW_UNSIGNED),
- CRASHTYPE(ARRAY_BOUNDS),
- CRASHTYPE(EXEC_DATA),
- CRASHTYPE(EXEC_STACK),
- CRASHTYPE(EXEC_KMALLOC),
- CRASHTYPE(EXEC_VMALLOC),
- CRASHTYPE(EXEC_RODATA),
- CRASHTYPE(EXEC_USERSPACE),
- CRASHTYPE(EXEC_NULL),
- CRASHTYPE(ACCESS_USERSPACE),
- CRASHTYPE(ACCESS_NULL),
- CRASHTYPE(WRITE_RO),
- CRASHTYPE(WRITE_RO_AFTER_INIT),
- CRASHTYPE(WRITE_KERN),
- CRASHTYPE(WRITE_OPD),
- CRASHTYPE(REFCOUNT_INC_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_DEC_ZERO),
- CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_INC_ZERO),
- CRASHTYPE(REFCOUNT_ADD_ZERO),
- CRASHTYPE(REFCOUNT_INC_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_SATURATED),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_TIMING),
- CRASHTYPE(ATOMIC_TIMING),
- CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
- CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
- CRASHTYPE(USERCOPY_STACK_FRAME_TO),
- CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
- CRASHTYPE(USERCOPY_STACK_BEYOND),
- CRASHTYPE(USERCOPY_KERNEL),
- CRASHTYPE(STACKLEAK_ERASING),
- CRASHTYPE(CFI_FORWARD_PROTO),
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
- CRASHTYPE(DOUBLE_FAULT),
+/* List of possible types for crashes that can be triggered. */
+static const struct crashtype_category *crashtype_categories[] = {
+ &bugs_crashtypes,
+ &heap_crashtypes,
+ &perms_crashtypes,
+ &refcount_crashtypes,
+ &usercopy_crashtypes,
+ &stackleak_crashtypes,
+ &cfi_crashtypes,
+ &fortify_crashtypes,
#ifdef CONFIG_PPC_64S_HASH_MMU
- CRASHTYPE(PPC_SLB_MULTIHIT),
+ &powerpc_crashtypes,
#endif
};
-
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
static struct crashpoint *lkdtm_crashpoint;
@@ -223,11 +135,16 @@ char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
{
- int i;
+ int cat, idx;
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- if (!strcmp(name, crashtypes[i].name))
- return &crashtypes[i];
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ if (!strcmp(name, crashtype->name))
+ return crashtype;
+ }
}
return NULL;
@@ -347,17 +264,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off)
{
+ int n, cat, idx;
+ ssize_t out;
char *buf;
- int i, n, out;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
- crashtypes[i].name);
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
+
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtype->name);
+ }
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index ab33bb5e2e7a..080293fa3c52 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,7 +10,7 @@
static volatile int fortify_scratch_space;
-void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFIED_OBJECT(void)
{
struct target {
char a[10];
@@ -31,7 +31,7 @@ void lkdtm_FORTIFIED_OBJECT(void)
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFIED_SUBOBJECT(void)
{
struct target {
char a[10];
@@ -67,7 +67,7 @@ void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFIED_STRSCPY(void)
{
char *src;
char dst[5];
@@ -134,3 +134,14 @@ void lkdtm_FORTIFIED_STRSCPY(void)
kfree(src);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(FORTIFIED_OBJECT),
+ CRASHTYPE(FORTIFIED_SUBOBJECT),
+ CRASHTYPE(FORTIFIED_STRSCPY),
+};
+
+struct crashtype_category fortify_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 8a92f5a800fa..62516078a619 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -22,8 +22,11 @@ static volatile int __offset = 1;
/*
* If there aren't guard pages, it's likely that a consecutive allocation will
* let us overflow into the second allocation without overwriting something real.
+ *
+ * This should always be caught because there is an unconditional unmapped
+ * page after vmap allocations.
*/
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
{
char *one, *two;
@@ -41,8 +44,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
+ *
+ * This should get caught by either memory tagging, KASan, or by using
+ * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
-void lkdtm_SLAB_LINEAR_OVERFLOW(void)
+static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -50,11 +56,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void)
return;
pr_info("Attempting slab linear overflow ...\n");
+ OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
-void lkdtm_WRITE_AFTER_FREE(void)
+static void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
@@ -80,7 +87,7 @@ void lkdtm_WRITE_AFTER_FREE(void)
pr_info("Hmm, didn't get the same memory range.\n");
}
-void lkdtm_READ_AFTER_FREE(void)
+static void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
@@ -124,7 +131,7 @@ void lkdtm_READ_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
@@ -144,7 +151,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
schedule();
}
-void lkdtm_READ_BUDDY_AFTER_FREE(void)
+static void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
@@ -181,7 +188,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_SLAB_INIT_ON_ALLOC(void)
+static void lkdtm_SLAB_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -213,7 +220,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void)
kfree(val);
}
-void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -246,7 +253,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void)
free_page((unsigned long)val);
}
-void lkdtm_SLAB_FREE_DOUBLE(void)
+static void lkdtm_SLAB_FREE_DOUBLE(void)
{
int *val;
@@ -263,7 +270,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void)
kmem_cache_free(double_free_cache, val);
}
-void lkdtm_SLAB_FREE_CROSS(void)
+static void lkdtm_SLAB_FREE_CROSS(void)
{
int *val;
@@ -279,7 +286,7 @@ void lkdtm_SLAB_FREE_CROSS(void)
kmem_cache_free(b_cache, val);
}
-void lkdtm_SLAB_FREE_PAGE(void)
+static void lkdtm_SLAB_FREE_PAGE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
@@ -313,3 +320,22 @@ void __exit lkdtm_heap_exit(void)
kmem_cache_destroy(a_cache);
kmem_cache_destroy(b_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+ CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_INIT_ON_ALLOC),
+ CRASHTYPE(BUDDY_INIT_ON_ALLOC),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
+};
+
+struct crashtype_category heap_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 305fc2ec3f25..015e0484026b 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -9,19 +9,19 @@
extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) \
pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
lkdtm_kernel_info); \
else \
pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
lkdtm_kernel_info); \
-}
+} while (0)
#ifndef MODULE
int lkdtm_check_bool_cmdline(const char *param);
#define pr_expected_config_param(kconfig, param) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
@@ -52,119 +52,49 @@ int lkdtm_check_bool_cmdline(const char *param);
break; \
} \
} \
-}
+} while (0)
#else
#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
#endif
-/* bugs.c */
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Category's collection of crashtypes. */
+struct crashtype_category {
+ struct crashtype *crashtypes;
+ size_t len;
+};
+
+/* Each category's crashtypes list. */
+extern struct crashtype_category bugs_crashtypes;
+extern struct crashtype_category heap_crashtypes;
+extern struct crashtype_category perms_crashtypes;
+extern struct crashtype_category refcount_crashtypes;
+extern struct crashtype_category usercopy_crashtypes;
+extern struct crashtype_category stackleak_crashtypes;
+extern struct crashtype_category cfi_crashtypes;
+extern struct crashtype_category fortify_crashtypes;
+extern struct crashtype_category powerpc_crashtypes;
+
+/* Each category's init/exit routines. */
void __init lkdtm_bugs_init(int *recur_param);
-void lkdtm_PANIC(void);
-void lkdtm_BUG(void);
-void lkdtm_WARNING(void);
-void lkdtm_WARNING_MESSAGE(void);
-void lkdtm_EXCEPTION(void);
-void lkdtm_LOOP(void);
-void lkdtm_EXHAUST_STACK(void);
-void lkdtm_CORRUPT_STACK(void);
-void lkdtm_CORRUPT_STACK_STRONG(void);
-void lkdtm_REPORT_STACK(void);
-void lkdtm_REPORT_STACK_CANARY(void);
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
-void lkdtm_SOFTLOCKUP(void);
-void lkdtm_HARDLOCKUP(void);
-void lkdtm_SPINLOCKUP(void);
-void lkdtm_HUNG_TASK(void);
-void lkdtm_OVERFLOW_SIGNED(void);
-void lkdtm_OVERFLOW_UNSIGNED(void);
-void lkdtm_ARRAY_BOUNDS(void);
-void lkdtm_CORRUPT_LIST_ADD(void);
-void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_STACK_GUARD_PAGE_LEADING(void);
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
-void lkdtm_UNSET_SMEP(void);
-void lkdtm_DOUBLE_FAULT(void);
-void lkdtm_CORRUPT_PAC(void);
-
-/* heap.c */
void __init lkdtm_heap_init(void);
void __exit lkdtm_heap_exit(void);
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void);
-void lkdtm_SLAB_LINEAR_OVERFLOW(void);
-void lkdtm_WRITE_AFTER_FREE(void);
-void lkdtm_READ_AFTER_FREE(void);
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
-void lkdtm_READ_BUDDY_AFTER_FREE(void);
-void lkdtm_SLAB_INIT_ON_ALLOC(void);
-void lkdtm_BUDDY_INIT_ON_ALLOC(void);
-void lkdtm_SLAB_FREE_DOUBLE(void);
-void lkdtm_SLAB_FREE_CROSS(void);
-void lkdtm_SLAB_FREE_PAGE(void);
-
-/* perms.c */
void __init lkdtm_perms_init(void);
-void lkdtm_WRITE_RO(void);
-void lkdtm_WRITE_RO_AFTER_INIT(void);
-void lkdtm_WRITE_KERN(void);
-void lkdtm_WRITE_OPD(void);
-void lkdtm_EXEC_DATA(void);
-void lkdtm_EXEC_STACK(void);
-void lkdtm_EXEC_KMALLOC(void);
-void lkdtm_EXEC_VMALLOC(void);
-void lkdtm_EXEC_RODATA(void);
-void lkdtm_EXEC_USERSPACE(void);
-void lkdtm_EXEC_NULL(void);
-void lkdtm_ACCESS_USERSPACE(void);
-void lkdtm_ACCESS_NULL(void);
-
-/* refcount.c */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_DEC_ZERO(void);
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_INC_ZERO(void);
-void lkdtm_REFCOUNT_ADD_ZERO(void);
-void lkdtm_REFCOUNT_INC_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_SATURATED(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_TIMING(void);
-void lkdtm_ATOMIC_TIMING(void);
-
-/* rodata.c */
-void lkdtm_rodata_do_nothing(void);
-
-/* usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
-void lkdtm_USERCOPY_STACK_FRAME_TO(void);
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
-void lkdtm_USERCOPY_STACK_BEYOND(void);
-void lkdtm_USERCOPY_KERNEL(void);
-
-/* stackleak.c */
-void lkdtm_STACKLEAK_ERASING(void);
-
-/* cfi.c */
-void lkdtm_CFI_FORWARD_PROTO(void);
-/* fortify.c */
-void lkdtm_FORTIFIED_OBJECT(void);
-void lkdtm_FORTIFIED_SUBOBJECT(void);
-void lkdtm_FORTIFIED_STRSCPY(void);
-
-/* powerpc.c */
-void lkdtm_PPC_SLB_MULTIHIT(void);
+/* Special declaration for function-in-rodata. */
+void lkdtm_rodata_do_nothing(void);
#endif
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 2c6aba3ff32b..b93404d65650 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -103,7 +103,7 @@ static void execute_user_location(void *dst)
pr_err("FAIL: func returned\n");
}
-void lkdtm_WRITE_RO(void)
+static void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test and make volatile. */
volatile unsigned long *ptr = (unsigned long *)&rodata;
@@ -113,7 +113,7 @@ void lkdtm_WRITE_RO(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_RO_AFTER_INIT(void)
+static void lkdtm_WRITE_RO_AFTER_INIT(void)
{
volatile unsigned long *ptr = &ro_after_init;
@@ -132,7 +132,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_KERN(void)
+static void lkdtm_WRITE_KERN(void)
{
size_t size;
volatile unsigned char *ptr;
@@ -149,7 +149,7 @@ void lkdtm_WRITE_KERN(void)
do_overwritten();
}
-void lkdtm_WRITE_OPD(void)
+static void lkdtm_WRITE_OPD(void)
{
size_t size = sizeof(func_desc_t);
void (*func)(void) = do_nothing;
@@ -166,38 +166,38 @@ void lkdtm_WRITE_OPD(void)
func();
}
-void lkdtm_EXEC_DATA(void)
+static void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
-void lkdtm_EXEC_STACK(void)
+static void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
-void lkdtm_EXEC_KMALLOC(void)
+static void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
-void lkdtm_EXEC_VMALLOC(void)
+static void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
-void lkdtm_EXEC_RODATA(void)
+static void lkdtm_EXEC_RODATA(void)
{
execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
CODE_AS_IS);
}
-void lkdtm_EXEC_USERSPACE(void)
+static void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
@@ -212,12 +212,12 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_EXEC_NULL(void)
+static void lkdtm_EXEC_NULL(void)
{
execute_location(NULL, CODE_AS_IS);
}
-void lkdtm_ACCESS_USERSPACE(void)
+static void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
@@ -250,7 +250,7 @@ void lkdtm_ACCESS_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_ACCESS_NULL(void)
+static void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
volatile unsigned long *ptr = (unsigned long *)NULL;
@@ -270,3 +270,24 @@ void __init lkdtm_perms_init(void)
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(WRITE_OPD),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
+};
+
+struct crashtype_category perms_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
index 077c9f9ed8d0..be385449911a 100644
--- a/drivers/misc/lkdtm/powerpc.c
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void)
preempt_enable();
}
-void lkdtm_PPC_SLB_MULTIHIT(void)
+static void lkdtm_PPC_SLB_MULTIHIT(void)
{
if (!radix_enabled()) {
pr_info("Injecting SLB multihit errors\n");
@@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void)
pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
}
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+};
+
+struct crashtype_category powerpc_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index de7c5ab528d9..5cd488f54cfa 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref)
* A refcount_inc() above the maximum value of the refcount implementation,
* should at least saturate, and at most also WARN.
*/
-void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void)
}
/* refcount_add() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
}
/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
}
/* refcount_add_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref)
* zero it should either saturate (when inc-from-zero isn't protected)
* or stay at zero (when inc-from-zero is protected) and should WARN for both.
*/
-void lkdtm_REFCOUNT_DEC_ZERO(void)
+static void lkdtm_REFCOUNT_DEC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(2);
@@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start)
}
/* A refcount_dec() going negative should saturate and may WARN. */
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
* A refcount_dec_and_test() should act like refcount_dec() above when
* going negative.
*/
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
* A refcount_sub_and_test() should act like refcount_dec_and_test()
* above when going negative.
*/
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(3);
@@ -203,7 +203,7 @@ static void check_from_zero(refcount_t *ref)
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
*/
-void lkdtm_REFCOUNT_INC_ZERO(void)
+static void lkdtm_REFCOUNT_INC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -228,7 +228,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void)
* A refcount_add() should act like refcount_inc() above when starting
* at zero.
*/
-void lkdtm_REFCOUNT_ADD_ZERO(void)
+static void lkdtm_REFCOUNT_ADD_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -267,7 +267,7 @@ static void check_saturated(refcount_t *ref)
* A refcount_inc() from a saturated value should at most warn about
* being saturated already.
*/
-void lkdtm_REFCOUNT_INC_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -278,7 +278,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -289,7 +289,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -300,7 +300,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -312,7 +312,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -324,7 +324,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -336,7 +336,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -348,7 +348,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
}
/* Used to time the existing atomic_t when used for reference counting */
-void lkdtm_ATOMIC_TIMING(void)
+static void lkdtm_ATOMIC_TIMING(void)
{
unsigned int i;
atomic_t count = ATOMIC_INIT(1);
@@ -373,7 +373,7 @@ void lkdtm_ATOMIC_TIMING(void)
* cd /sys/kernel/debug/provoke-crash
* perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
*/
-void lkdtm_REFCOUNT_TIMING(void)
+static void lkdtm_REFCOUNT_TIMING(void)
{
unsigned int i;
refcount_t count = REFCOUNT_INIT(1);
@@ -390,3 +390,30 @@ void lkdtm_REFCOUNT_TIMING(void)
else
pr_info("refcount timing: done\n");
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(REFCOUNT_TIMING),
+};
+
+struct crashtype_category refcount_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index 82369c6f889e..025b133297a6 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -115,7 +115,7 @@ out:
}
}
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
unsigned long flags;
@@ -124,7 +124,7 @@ void lkdtm_STACKLEAK_ERASING(void)
local_irq_restore(flags);
}
#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
@@ -133,3 +133,12 @@ void lkdtm_STACKLEAK_ERASING(void)
}
}
#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(STACKLEAK_ERASING),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 9161ce7ed47a..6215ec995cd3 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
@@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n";
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
- return stack + 0;
+ return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
- unsigned char buf[32];
+ unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
buf[i] = value & 0xff;
}
- return trick_compiler(buf);
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
bad_stack -= sizeof(unsigned long);
}
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
@@ -119,7 +131,7 @@ free_user:
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
-static void do_usercopy_heap_size(bool to_user)
+static void do_usercopy_slab_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
@@ -185,9 +197,9 @@ free_kernel:
/*
* This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
+ * test passes, then do_usercopy_slab_size() tests will pass too.
*/
-static void do_usercopy_heap_whitelist(bool to_user)
+static void do_usercopy_slab_whitelist(bool to_user)
{
unsigned long user_alloc;
unsigned char *buf = NULL;
@@ -261,42 +273,42 @@ free_alloc:
}
/* Callable tests. */
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{
- do_usercopy_heap_size(true);
+ do_usercopy_slab_size(true);
}
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{
- do_usercopy_heap_size(false);
+ do_usercopy_slab_size(false);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{
- do_usercopy_heap_whitelist(true);
+ do_usercopy_slab_whitelist(true);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{
- do_usercopy_heap_whitelist(false);
+ do_usercopy_slab_whitelist(false);
}
-void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
-void lkdtm_USERCOPY_STACK_BEYOND(void)
+static void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
-void lkdtm_USERCOPY_KERNEL(void)
+static void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
@@ -330,6 +342,86 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = folio_address(folio);
+ if (addr)
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ else
+ pr_err("folio_address() failed?!\n");
+ folio_put(folio);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(whitelist_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_FOLIO),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+struct crashtype_category usercopy_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index ec2a4fce8581..e889a8bd7ac8 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
{
struct device *base = data;
- if (strcmp(dev->driver->name, "i915") ||
+ if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_HDCP)
return 0;
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index f7380d387bab..5c39457e3f53 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
{
struct device *base = data;
- if (strcmp(dev->driver->name, "i915") ||
+ if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_PXP)
return 0;
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 4b8f1c7d726d..049a12006348 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event)
{
struct pvpanic_instance *pi_cur;
- spin_lock(&pvpanic_lock);
+ if (!spin_trylock(&pvpanic_lock))
+ return;
+
list_for_each_entry(pi_cur, &pvpanic_list, list) {
if (event & pi_cur->capability & pi_cur->events)
iowrite8(event, pi_cur->base);
@@ -55,9 +57,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused
return NOTIFY_DONE;
}
+/*
+ * Call our notifier very early on panic, deferring the
+ * action taken to the hypervisor.
+ */
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper() */
+ .priority = INT_MAX,
};
static void pvpanic_remove(void *param)
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index f1d8ba6d4857..086ce77d9074 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1452,10 +1452,10 @@ static void vmballoon_reset(struct vmballoon *b)
error = vmballoon_vmci_init(b);
if (error)
- pr_err("failed to initialize vmci doorbell\n");
+ pr_err_once("failed to initialize vmci doorbell\n");
if (vmballoon_send_guest_id(b))
- pr_err("failed to send guest ID to the host\n");
+ pr_err_once("failed to send guest ID to the host\n");
unlock:
up_write(&b->conf_sem);
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
index 605794aadf11..b6d4d7fd686a 100644
--- a/drivers/misc/vmw_vmci/Kconfig
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -5,7 +5,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI
+ depends on (X86 || ARM64) && !CPU_BIG_ENDIAN && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 6cf3e21c7604..172696abce31 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -665,9 +665,8 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
{
struct vmci_ctx *context;
- struct vmci_handle_list *notifier, *tmp;
+ struct vmci_handle_list *notifier = NULL, *iter, *tmp;
struct vmci_handle handle;
- bool found = false;
context = vmci_ctx_get(context_id);
if (!context)
@@ -676,23 +675,23 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
spin_lock(&context->lock);
- list_for_each_entry_safe(notifier, tmp,
+ list_for_each_entry_safe(iter, tmp,
&context->notifier_list, node) {
- if (vmci_handle_is_equal(notifier->handle, handle)) {
- list_del_rcu(&notifier->node);
+ if (vmci_handle_is_equal(iter->handle, handle)) {
+ list_del_rcu(&iter->node);
context->n_notifiers--;
- found = true;
+ notifier = iter;
break;
}
}
spin_unlock(&context->lock);
- if (found)
+ if (notifier)
kvfree_rcu(notifier);
vmci_ctx_put(context);
- return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+ return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
}
static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 57a6157209a1..aa7b05de97dd 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -614,6 +614,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
}
if (!mmio_base) {
+ if (IS_ENABLED(CONFIG_ARM64)) {
+ dev_err(&pdev->dev, "MMIO base is invalid\n");
+ return -ENXIO;
+ }
error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (error) {
dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 94ebf7f3fd58..8f2de1893245 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -2577,6 +2577,12 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
if (result < VMCI_SUCCESS)
return result;
+ /*
+ * This virt_wmb() ensures that data written to the queue
+ * is observable before the new producer_tail is.
+ */
+ virt_wmb();
+
vmci_q_header_add_producer_tail(produce_q->q_header, written,
produce_q_size);
return written;
@@ -2620,6 +2626,12 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
if (buf_ready < VMCI_SUCCESS)
return (ssize_t) buf_ready;
+ /*
+ * This virt_rmb() ensures that data from the queue will be read
+ * after we have determined how much is ready to be consumed.
+ */
+ virt_rmb();
+
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
head = vmci_q_header_consumer_head(produce_q->q_header);
if (likely(head + read < consume_q_size)) {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1259ca22d625..f4a1281658db 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
- else
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 316393c694d7..0db9490dc659 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -31,10 +31,10 @@
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/sizes.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 1499a64ec3aa..f13c08db3da5 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
struct sdhci_host *host = slot->host;
u16 clock;
+ if (host->mmc->ios.power_mode != MMC_POWER_ON)
+ return 0;
+
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock |= SDHCI_CLOCK_PLL_EN;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7d96758a8f04..1749dbbacc13 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -16,8 +16,6 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <mach/hardware.h>
-
#include <asm/mach/flash.h>
#define CACHELINESIZE 32
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 28f55f9cf715..0ee452275578 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -97,6 +97,33 @@ out:
return e;
}
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+ int beb_rsvd_pebs;
+
+ if (!ubi->free.rb_node)
+ return false;
+
+ beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+ if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
- if (ubi->fm_next_anchor) {
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->free_count++;
- }
- /* All available PEBs are in ubi->free, now is the time to get
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!ubi->free.rb_node)
+ if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -253,6 +275,58 @@ out:
return ret;
}
+/**
+ * next_peb_for_wl - returns next PEB to be used internally by the
+ * WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size)
+ return NULL;
+
+ pnum = pool->pebs[pool->used];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * need_wear_leveling - checks whether to trigger a wear leveling work.
+ * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
+ * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
+ * 'wl_pool' by ubi_refill_pools().
+ *
+ * @ubi: UBI device description object
+ */
+static bool need_wear_leveling(struct ubi_device *ubi)
+{
+ int ec;
+ struct ubi_wl_entry *e;
+
+ if (!ubi->used.rb_node)
+ return false;
+
+ e = next_peb_for_wl(ubi);
+ if (!e) {
+ if (!ubi->free.rb_node)
+ return false;
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = e->ec;
+ } else {
+ ec = e->ec;
+ if (ubi->free.rb_node) {
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = max(ec, e->ec);
+ }
+ }
+ e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+
+ return ec - e->ec >= UBI_WL_THRESHOLD;
+}
+
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
@@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we have a next anchor? */
- if (!ubi->fm_next_anchor) {
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (!ubi->fm_next_anchor)
- /* Tell wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
}
- /* Do wear leveling to get a new anchor PEB or check the
- * existing next anchor candidate.
- */
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
- if (ubi->fm_next_anchor) {
- return_unused_peb(ubi, ubi->fm_next_anchor);
- ubi->fm_next_anchor = NULL;
- }
-
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 6b5f1ffd961b..6e95c4b1473e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
- if (ubi->fm_next_anchor) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
- fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
- set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
- fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
- free_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7c083ad58274..078112e23dfd 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -489,8 +489,7 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -601,7 +600,6 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
- struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1bc7b3a05604..6ea95ade4ca6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -309,7 +309,6 @@ out_mapping:
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8455f1d47f3c..55bae06cf408 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!next_peb_for_wl(ubi) ||
+#else
if (!ubi->free.rb_node ||
+#endif
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* No free physical eraseblocks? Well, they must be waiting in
@@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
- if (e1 && ubi->fm_next_anchor &&
- (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
- /* fm_next_anchor is no longer considered a good anchor
- * candidate.
+ /*
+ * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->fm_next_anchor = NULL;
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -1003,8 +1007,6 @@ out_cancel:
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
- struct ubi_wl_entry *e1;
- struct ubi_wl_entry *e2;
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
@@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* the WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!need_wear_leveling(ubi))
+ goto out_unlock;
+#else
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+
if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
@@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
+#endif
dbg_wl("schedule wear-leveling");
} else
dbg_wl("schedule scrubbing");
@@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
- /* Abort anchor production, if needed it will be
+ /*
+ * Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
- ubi->fm_next_anchor = e;
+ ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index c93a53293786..5ebe374a08ae 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -5,6 +5,8 @@
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
+static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index fbb32aa49b24..48cf344750ff 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1603,12 +1603,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
- return -ENOSPC;
-
*idx = find_first_bit(free_bins, dev->num_arl_bins);
-
- return -ENOENT;
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 7b37d45bc9fb..d94150d8f3f4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 ctrl, u16 status, u16 lpa,
+ u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
+ state->link = false;
+
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
- * still get here on link up. But we want to set an_complete
- * only if AN was enabled, thus we look at BMCR_ANENABLE.
- * (According to 802.3-2008 section 22.2.4.2.10, we should be
- * able to get this same value from BMSR_ANEGCAPABLE, but tests
- * show that these Marvell PHYs don't conform to this part of
- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ * still get here on link up.
*/
- state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &ctrl);
+ MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 3bb42a9f236d..769f672e9128 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
return 0;
}
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
- phy_interface_t interface)
-{
- int ext_int;
-
- ext_int = rtl8365mb_extint_port_map[port];
-
- if (ext_int < 0 &&
- (interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL ||
- interface == PHY_INTERFACE_MODE_GMII))
- /* Internal PHY */
- return true;
- else if ((ext_int >= 1) &&
- phy_interface_mode_is_rgmii(interface))
- /* Extension MAC */
- return true;
-
- return false;
-}
-
static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port))
+ if (dsa_is_user_port(ds, port)) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
- else if (dsa_is_cpu_port(ds, port))
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ } else if (dsa_is_cpu_port(ds, port)) {
phy_interface_set_rgmii(config->supported_interfaces);
+ }
config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
@@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
struct realtek_priv *priv = ds->priv;
int ret;
- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a3816264c35c..8c5828582c21 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a3593290886f..4d46780fad13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
for (i = 0; i < skb->len; i += 32) {
unsigned int len = min(skb->len - i, 32U);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3272aca496dc..47fc8e6963d5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2180,13 +2180,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
return -EOPNOTSUPP;
- /* All filters are already in use, we cannot match more rules */
- if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
- RXCHK_BRCM_TAG_MAX)
- return -ENOSPC;
-
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index >= RXCHK_BRCM_TAG_MAX)
+ /* All filters are already in use, we cannot match more rules */
return -ENOSPC;
/* Location is the classification ID, and index is the position
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7f11c0a8e7a9..d4e63f0644c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 54f235c216a9..2dd192b5e4e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -355,7 +355,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -438,7 +438,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- bitmap_weight(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index fe3472e04c23..9106c359e64c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b3b3c079a0fa..59c9a10f83ba 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ else
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!ring->data[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e10b7b04b894..c56d2194cbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port, err;
+ int p, port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
@@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port;
+ int p, port;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
-
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 552b6e26e701..2a8fc547eb37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- struct lag_tracker tracker;
+ struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
int i;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 443a5d6eb57b..7c31a46195b2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv6);
}
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
map[FLOW_PAY_GRE] = key_size;
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
@@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
}
- if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
- map[FLOW_PAY_QINQ] = key_size;
- key_size += sizeof(struct nfp_flower_vlan);
- }
-
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
map[FLOW_PAY_UDP_TUN] = key_size;
@@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
offset = key_map[FLOW_PAY_GRE];
key = kdata + offset;
@@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
- offset = key_map[FLOW_PAY_QINQ];
- key = kdata + offset;
- msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
- (struct nfp_flower_vlan *)msk,
- rules[i]);
- }
- }
-
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
offset = key_map[FLOW_PAY_UDP_TUN];
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 193a167a6762..e01430139b6d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match;
@@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
- (struct nfp_flower_vlan *)msk,
- rule);
- ext += sizeof(struct nfp_flower_vlan);
- msk += sizeof(struct nfp_flower_vlan);
- }
-
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 54af30961351..6eeeb0fda91f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -15,7 +15,7 @@
#include "nfp_net_sriov.h"
static int
-nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
+nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn)
{
u16 cap_vf;
@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP);
if ((cap_vf & cap) != cap) {
- nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
+ if (warn)
+ nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
return -EOPNOTSUPP;
}
if (vf < 0 || vf >= app->pf->num_vfs) {
- nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
+ if (warn)
+ nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
return -EINVAL;
}
@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
unsigned int vf_offset;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true);
if (err)
return err;
@@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u32 vlan_tag;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true);
if (err)
return err;
@@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
}
/* Check if fw supports or not */
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true);
if (err)
is_proto_sup = false;
@@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf,
u32 vf_offset, ratevalue;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true);
if (err)
return err;
@@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF,
- "spoofchk");
+ "spoofchk", true);
if (err)
return err;
@@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
- "trust");
+ "trust", true);
if (err)
return err;
@@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE,
- "link_state");
+ "link_state", true);
if (err)
return err;
@@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
u8 flags;
int err;
- err = nfp_net_sriov_check(app, vf, 0, "");
+ err = nfp_net_sriov_check(app, vf, 0, "", true);
if (err)
return err;
@@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
- if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false))
ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false);
if (!err) {
rate = readl(app->pf->vfcfg_tbl2 + vf_offset +
NFP_NET_VF_CFG_RATE);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 23b668de4640..69b0ede75cae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -319,44 +319,27 @@ free_rdma_dev:
void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, bool check)
{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
+ unsigned int bit, weight, nbits;
+ unsigned long *b;
+
+ if (!check)
+ goto end;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ if (!weight)
goto end;
DP_NOTICE(p_hwfn,
"%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
bmap->name, bmap->max_count, weight);
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ for (bit = 0; bit < bmap->max_count; bit += 512) {
+ b = bmap->bitmap + BITS_TO_LONGS(bit);
+ nbits = min(bmap->max_count - bit, 512U);
+
+ if (!bitmap_empty(b, nbits))
DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ "line 0x%04x: %*pb\n", bit / 512, nbits, b);
}
end:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 071b4aeaddf2..134ecfca96a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -76,7 +76,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
- while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
/* If the HW device is during recovery, all resources are
* immediately reset without receiving a per-cid indication
* from HW. In this case we don't expect the cid bitmap to be
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdca00c0dc1f..d55f59ce4a31 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -441,7 +441,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
* .bind which is called before usbnet sets up dev->maxpacket
*/
if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
- val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ val % usb_maxpacket(dev->udev, dev->out) == 0)
val++;
/* we might need to flush any pending tx buffers if running */
@@ -465,7 +465,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
usbnet_update_max_qlen(dev);
/* never pad more than 3 full USB packets per transfer */
- ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+ ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out),
CDC_NCM_MIN_TX_PKT, ctx->tx_max);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 636a405844c5..3226ab33afae 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -4421,7 +4421,7 @@ static int lan78xx_probe(struct usb_interface *intf,
goto out4;
period = ep_intr->desc.bInterval;
- maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
buf = kmalloc(maxp, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -4439,7 +4439,7 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
- dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 4e70dec30e5a..f79333fe1783 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -333,7 +333,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
net->hard_header_len += sizeof (struct rndis_data_hdr);
dev->hard_mtu = net->mtu + net->hard_header_len;
- dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
netif_dbg(dev, probe, dev->net,
"dev->maxpacket can't be 0\n");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 36b24ec11650..1cb6dab3e2d0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -229,7 +229,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
pipe = usb_rcvintpipe (dev->udev,
dev->status->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- maxp = usb_maxpacket (dev->udev, pipe, 0);
+ maxp = usb_maxpacket(dev->udev, pipe);
/* avoid 1 msec chatter: min 8 msec poll rate */
period = max ((int) dev->status->desc.bInterval,
@@ -1789,7 +1789,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
- dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
/* that is a broken device */
status = -ENODEV;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index a85e192c9d59..1bb92ca7451b 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1068,7 +1068,7 @@ int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
if (usb->data_len < 32)
usb->data_len = 32;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 74c3d8cb3100..0827bc860bf8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -586,10 +586,10 @@ static void rt2x00usb_assign_endpoint(struct data_queue *queue,
if (queue->qid == QID_RX) {
pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
} else {
pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
}
if (!queue->usb_maxpacket)
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
index 2b92c227efbc..d35dd940d968 100644
--- a/drivers/net/wireless/silabs/wfx/hif_tx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
@@ -280,7 +280,7 @@ int wfx_hif_stop_scan(struct wfx_vif *wvif)
}
int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
- struct ieee80211_channel *channel, const u8 *ssid, int ssidlen)
+ struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
{
int ret;
struct wfx_hif_msg *hif;
@@ -288,8 +288,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
- WARN_ON(sizeof(body->ssid) < ssidlen);
- WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+ WARN_ON(sizeof(body->ssid) < ssid_len);
+ WARN(!conf->ibss_joined && !ssid_len, "joining an unknown BSS");
if (!hif)
return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
@@ -300,8 +300,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
if (ssid) {
- body->ssid_length = cpu_to_le32(ssidlen);
- memcpy(body->ssid, ssid, ssidlen);
+ body->ssid_length = cpu_to_le32(ssid_len);
+ memcpy(body->ssid, ssid, ssid_len);
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index bbfd3fa51921..e015bfb8d221 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -170,7 +170,7 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
*
* The PDS file is an array of Time-Length-Value structs.
*/
- int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret, chunk_type, chunk_len, chunk_num = 0;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index e551fa284a43..329d7f4a2b2e 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -409,8 +409,8 @@ static void wfx_join(struct wfx_vif *wvif)
struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct cfg80211_bss *bss = NULL;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- const u8 *ssidie = NULL;
- int ssidlen = 0;
+ const u8 *ssid_ie = NULL;
+ int ssid_len = 0;
int ret;
wfx_tx_lock_flush(wvif->wdev);
@@ -422,21 +422,21 @@ static void wfx_join(struct wfx_vif *wvif)
return;
}
- rcu_read_lock(); /* protect ssidie */
+ rcu_read_lock(); /* protect ssid_ie */
if (bss)
- ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssidie) {
- ssidlen = ssidie[1];
- if (ssidlen > IEEE80211_MAX_SSID_LEN)
- ssidlen = IEEE80211_MAX_SSID_LEN;
- memcpy(ssid, &ssidie[2], ssidlen);
+ ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssid_ie) {
+ ssid_len = ssid_ie[1];
+ if (ssid_len > IEEE80211_MAX_SSID_LEN)
+ ssid_len = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssid_ie[2], ssid_len);
}
rcu_read_unlock();
cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
wvif->join_in_progress = true;
- ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
+ ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len);
if (ret) {
ieee80211_connection_loss(vif);
wfx_reset(wvif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 65ab907aca5a..8c0b9546d5a2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1386,7 +1386,7 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
- (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i]);
queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = INVALID_GRANT_REF;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
@@ -1418,8 +1418,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
* foreign access is ended (which may be deferred).
*/
get_page(page);
- gnttab_end_foreign_access(ref,
- (unsigned long)page_address(page));
+ gnttab_end_foreign_access(ref, page);
queue->grant_rx_ref[id] = INVALID_GRANT_REF;
kfree_skb(skb);
@@ -1760,7 +1759,7 @@ static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
if (ref != INVALID_GRANT_REF)
- gnttab_end_foreign_access(ref, (unsigned long)page);
+ gnttab_end_foreign_access(ref, virt_to_page(page));
}
static void xennet_disconnect_backend(struct netfront_info *info)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 72f7c955c707..24165daee3c8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
+ rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl;
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(rq, false);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2227,8 +2228,16 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- ctrl->ctrl_config |= NVME_CC_ENABLE;
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+ /* Flush write to device (required if transport is PCI) */
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7ae72c7a211b..3c778bb0c294 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1899,6 +1899,24 @@ nvme_fc_ctrl_ioerr_work(struct work_struct *work)
nvme_fc_error_recovery(ctrl, "transport detected io error");
}
+/*
+ * nvme_fc_io_getuuid - Routine called to get the appid field
+ * associated with request by the lldd
+ * @req:IO request from nvme fc to driver
+ * Returns: UUID if there is an appid associated with VM or
+ * NULL if the user/libvirt has not set the appid to VM
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ return NULL;
+ return blkcg_get_fc_appid(rq->bio);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
+
static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 096b1b47d750..a2e89db1cd63 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd;
/* to free bio on completion, as req->bio will be null at that time */
@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
- blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+ blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a98a7de0964..48f4f6eb877b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
nvme_init_request(abort_req, &cmd);
+ abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL;
abort_req->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(abort_req, false, abort_endio);
+ blk_execute_rq_nowait(abort_req, false);
/*
* The aborted req will be completed on receiving the abort req.
@@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return PTR_ERR(req);
nvme_init_request(req, &cmd);
+ if (opcode == nvme_admin_delete_cq)
+ req->end_io = nvme_del_cq_end;
+ else
+ req->end_io = nvme_del_queue_end;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
req->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
- nvme_del_cq_end : nvme_del_queue_end);
+ blk_execute_rq_nowait(req, false);
return 0;
}
@@ -3453,6 +3457,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..b1f7efab3918 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -97,7 +97,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sgls |= cpu_to_le32(1 << 20);
/*
- * When passsthru controller is setup using nvme-loop transport it will
+ * When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
* code path with duplicate ctr subsynqn. In order to prevent that we
@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
} else {
+ rq->end_io = nvmet_passthru_req_done;
rq->end_io_data = req;
- blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+ blk_execute_rq_nowait(rq, false);
}
if (ns)
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 555aa77a574d..967d0084800e 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -304,6 +304,7 @@ config NVMEM_LAYERSCAPE_SFP
tristate "Layerscape SFP (Security Fuse Processor) support"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
This driver provides support to read the eFuses on Freescale
Layerscape SoC's. For example, the vendor provides a per part
@@ -324,4 +325,16 @@ config NVMEM_SUNPLUS_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-sunplus-ocotp.
+config NVMEM_APPLE_EFUSES
+ tristate "Apple eFuse support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to enable support for reading eFuses on Apple SoCs
+ such as the M1. These are e.g. used to store factory programmed
+ calibration data required for the PCIe or the USB-C PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-apple-efuses.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 891958e29d25..00e136a0a123 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -65,3 +65,5 @@ obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
nvmem-layerscape-sfp-y := layerscape-sfp.o
obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
+nvmem-apple-efuses-y := apple-efuses.o
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
new file mode 100644
index 000000000000..9b7c87102104
--- /dev/null
+++ b/drivers/nvmem/apple-efuses.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC eFuse driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct apple_efuses_priv {
+ void __iomem *fuses;
+};
+
+static int apple_efuses_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct apple_efuses_priv *priv = context;
+ u32 *dst = val;
+
+ while (bytes >= sizeof(u32)) {
+ *dst++ = readl_relaxed(priv->fuses + offset);
+ bytes -= sizeof(u32);
+ offset += sizeof(u32);
+ }
+
+ return 0;
+}
+
+static int apple_efuses_probe(struct platform_device *pdev)
+{
+ struct apple_efuses_priv *priv;
+ struct resource *res;
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .read_only = true,
+ .reg_read = apple_efuses_read,
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .name = "apple_efuses_nvmem",
+ .id = NVMEM_DEVID_AUTO,
+ .root_only = true,
+ };
+
+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->fuses))
+ return PTR_ERR(priv->fuses);
+
+ config.priv = priv;
+ config.size = resource_size(res);
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
+}
+
+static const struct of_device_id apple_efuses_of_match[] = {
+ { .compatible = "apple,efuses", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_efuses_of_match);
+
+static struct platform_driver apple_efuses_driver = {
+ .driver = {
+ .name = "apple_efuses",
+ .of_match_table = apple_efuses_of_match,
+ },
+ .probe = apple_efuses_probe,
+};
+
+module_platform_driver(apple_efuses_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index a8097511582a..dfea96c52463 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -244,7 +244,7 @@ static const struct of_device_id bcm_otpc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
-static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = {
{ .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
{ .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
{ /* sentinel */ }
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 439f00b9eef6..450b927691c3 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -72,6 +73,7 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
return -ENOMEM;
priv->cells[idx].offset = value - (char *)data;
priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
}
return 0;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f58d9bc7aa08..1e3c754efd0d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
+ cell->np = info->np;
if (cell->nbits)
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c
index e591c1511e33..e2b424561949 100644
--- a/drivers/nvmem/layerscape-sfp.c
+++ b/drivers/nvmem/layerscape-sfp.c
@@ -13,15 +13,17 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200
struct layerscape_sfp_priv {
- void __iomem *base;
+ struct regmap *regmap;
};
struct layerscape_sfp_data {
int size;
+ enum regmap_endian endian;
};
static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
@@ -29,15 +31,16 @@ static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
{
struct layerscape_sfp_priv *priv = context;
- memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset,
- bytes);
-
- return 0;
+ return regmap_bulk_read(priv->regmap,
+ LAYERSCAPE_SFP_OTP_OFFSET + offset, val,
+ bytes / 4);
}
static struct nvmem_config layerscape_sfp_nvmem_config = {
.name = "fsl-sfp",
.reg_read = layerscape_sfp_read,
+ .word_size = 4,
+ .stride = 4,
};
static int layerscape_sfp_probe(struct platform_device *pdev)
@@ -45,16 +48,26 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
const struct layerscape_sfp_data *data;
struct layerscape_sfp_priv *priv;
struct nvmem_device *nvmem;
+ struct regmap_config config = { 0 };
+ void __iomem *base;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
data = device_get_match_data(&pdev->dev);
+ config.reg_bits = 32;
+ config.reg_stride = 4;
+ config.val_bits = 32;
+ config.val_format_endian = data->endian;
+ config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4;
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
layerscape_sfp_nvmem_config.size = data->size;
layerscape_sfp_nvmem_config.dev = &pdev->dev;
@@ -65,11 +78,18 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(nvmem);
}
+static const struct layerscape_sfp_data ls1021a_data = {
+ .size = 0x88,
+ .endian = REGMAP_ENDIAN_BIG,
+};
+
static const struct layerscape_sfp_data ls1028a_data = {
.size = 0x88,
+ .endian = REGMAP_ENDIAN_LITTLE,
};
static const struct of_device_id layerscape_sfp_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data },
{ .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
{},
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 162132c7dab9..c1e893c8a247 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -217,9 +217,8 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
goto err_clk_rate_set;
}
- ret = pm_runtime_get_sync(priv->dev);
+ ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0) {
- pm_runtime_put_noidle(priv->dev);
dev_err(priv->dev, "Failed to enable power-domain\n");
goto err_reg_enable;
}
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index 2dc59c22eb55..52b928a7a6d5 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -71,7 +71,7 @@ struct sp_ocotp_data {
int size;
};
-const struct sp_ocotp_data sp_otp_v0 = {
+static const struct sp_ocotp_data sp_otp_v0 = {
.size = QAC628_OTP_SIZE,
};
@@ -202,8 +202,6 @@ static int sp_ocotp_probe(struct platform_device *pdev)
(int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
(int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
- dev_info(dev, "by Sunplus (C) 2020");
-
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c263ffc5884a..fc804e08e3cb 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -567,31 +567,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c43267b18f55..c59ddde42007 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -50,18 +50,5 @@ sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o
pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
-pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
-pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
-pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
-pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
-pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
-pxa2xx-obj-$(CONFIG_MACH_BALLOON3) += pxa2xx_balloon3.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI) += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI320) += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_H4700) += pxa2xx_hx4700.o
-
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
-
obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
deleted file mode 100644
index 5fe1da7a50e4..000000000000
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_balloon3.c
- *
- * Balloon3 PCMCIA specific routines.
- *
- * Author: Nick Bane
- * Created: June, 2006
- * Copyright: Toby Churchill Ltd
- * Derived from pxa2xx_mainstone.c, by Nico Pitre
- *
- * Various modification by Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <mach/balloon3.h>
-
-#include <asm/mach-types.h>
-
-#include "soc_common.h"
-
-static int balloon3_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- uint16_t ver;
-
- ver = __raw_readw(BALLOON3_FPGA_VER);
- if (ver < 0x4f08)
- pr_warn("The FPGA code, version 0x%04x, is too old. "
- "PCMCIA/CF support might be broken in this version!",
- ver);
-
- skt->socket.pci_irq = BALLOON3_BP_CF_NRDY_IRQ;
- skt->stat[SOC_STAT_CD].gpio = BALLOON3_GPIO_S0_CD;
- skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
- skt->stat[SOC_STAT_BVD1].irq = BALLOON3_BP_NSTSCHG_IRQ;
- skt->stat[SOC_STAT_BVD1].name = "PCMCIA0 STSCHG";
-
- return 0;
-}
-
-static unsigned long balloon3_pcmcia_status[2] = {
- BALLOON3_CF_nSTSCHG_BVD1,
- BALLOON3_CF_nSTSCHG_BVD1
-};
-
-static void balloon3_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- uint16_t status;
- int flip;
-
- /* This actually reads the STATUS register */
- status = __raw_readw(BALLOON3_CF_STATUS_REG);
- flip = (status ^ balloon3_pcmcia_status[skt->nr])
- & BALLOON3_CF_nSTSCHG_BVD1;
- /*
- * Workaround for STSCHG which can't be deasserted:
- * We therefore disable/enable corresponding IRQs
- * as needed to avoid IRQ locks.
- */
- if (flip) {
- balloon3_pcmcia_status[skt->nr] = status;
- if (status & BALLOON3_CF_nSTSCHG_BVD1)
- enable_irq(BALLOON3_BP_NSTSCHG_IRQ);
- else
- disable_irq(BALLOON3_BP_NSTSCHG_IRQ);
- }
-
- state->ready = !!(status & BALLOON3_CF_nIRQ);
- state->bvd1 = !!(status & BALLOON3_CF_nSTSCHG_BVD1);
- state->bvd2 = 0; /* not available */
- state->vs_3v = 1; /* Always true its a CF card */
- state->vs_Xv = 0; /* not available */
-}
-
-static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- __raw_writew(BALLOON3_CF_RESET, BALLOON3_CF_CONTROL_REG +
- ((state->flags & SS_RESET) ?
- BALLOON3_FPGA_SETnCLR : 0));
- return 0;
-}
-
-static struct pcmcia_low_level balloon3_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = balloon3_pcmcia_hw_init,
- .socket_state = balloon3_pcmcia_socket_state,
- .configure_socket = balloon3_pcmcia_configure_socket,
- .first = 0,
- .nr = 1,
-};
-
-static struct platform_device *balloon3_pcmcia_device;
-
-static int __init balloon3_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_balloon3())
- return -ENODEV;
-
- balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!balloon3_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(balloon3_pcmcia_device,
- &balloon3_pcmcia_ops, sizeof(balloon3_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(balloon3_pcmcia_device);
-
- if (ret)
- platform_device_put(balloon3_pcmcia_device);
-
- return ret;
-}
-
-static void __exit balloon3_pcmcia_exit(void)
-{
- platform_device_unregister(balloon3_pcmcia_device);
-}
-
-module_init(balloon3_pcmcia_init);
-module_exit(balloon3_pcmcia_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Nick Bane <nick@cecomputing.co.uk>");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_DESCRIPTION("Balloon3 board CF/PCMCIA driver");
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index d6d2f75f8f47..0ea41f1411e5 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -23,12 +23,11 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
-#include <mach/hardware.h>
-#include <mach/smemc.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <mach/pxa2xx-regs.h>
#include <asm/mach-types.h>
#include <pcmcia/ss.h>
@@ -113,7 +112,7 @@ static inline u_int pxa2xx_pcmcia_cmd_time(u_int mem_clk_10khz,
return (300000 * (pcmcia_mcxx_asst + 1) / mem_clk_10khz);
}
-static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
+static uint32_t pxa2xx_pcmcia_mcmem(int sock, int speed, int clock)
{
uint32_t val;
@@ -124,12 +123,10 @@ static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCMEM(sock));
-
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcio(int sock, int speed, int clock)
{
uint32_t val;
@@ -140,12 +137,11 @@ static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCIO(sock));
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcatt(int sock, int speed, int clock)
{
uint32_t val;
@@ -156,31 +152,26 @@ static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCATT(sock));
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcxx(struct soc_pcmcia_socket *skt, unsigned int clk)
+static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
{
+ unsigned long clk = clk_get_rate(skt->clk) / 10000;
struct soc_pcmcia_timing timing;
int sock = skt->nr;
soc_common_pcmcia_get_timing(skt, &timing);
- pxa2xx_pcmcia_set_mcmem(sock, timing.mem, clk);
- pxa2xx_pcmcia_set_mcatt(sock, timing.attr, clk);
- pxa2xx_pcmcia_set_mcio(sock, timing.io, clk);
+ pxa_smemc_set_pcmcia_timing(sock,
+ pxa2xx_pcmcia_mcmem(sock, timing.mem, clk),
+ pxa2xx_pcmcia_mcatt(sock, timing.attr, clk),
+ pxa2xx_pcmcia_mcio(sock, timing.io, clk));
return 0;
}
-static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
-{
- unsigned long clk = clk_get_rate(skt->clk);
- return pxa2xx_pcmcia_set_mcxx(skt, clk / 10000);
-}
-
#ifdef CONFIG_CPU_FREQ
static int
@@ -215,18 +206,13 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- /*
- * We have at least one socket, so set MECR:CIT
- * (Card Is There)
- */
- uint32_t mecr = MECR_CIT;
+ int nr = 1;
- /* Set MECR:NOS (Number Of Sockets) */
if ((ops->first + ops->nr) > 1 ||
machine_is_viper() || machine_is_arcom_zeus())
- mecr |= MECR_NOS;
+ nr = 2;
- __raw_writel(mecr, MECR);
+ pxa_smemc_set_pcmcia_socket(nr);
}
EXPORT_SYMBOL(pxa2xx_configure_sockets);
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
deleted file mode 100644
index f0f725e99604..000000000000
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_colibri.c
- *
- * Driver for Toradex Colibri PXA270 CF socket
- *
- * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-
-#include <asm/mach-types.h>
-
-#include "soc_common.h"
-
-#define COLIBRI270_RESET_GPIO 53
-#define COLIBRI270_PPEN_GPIO 107
-#define COLIBRI270_BVD1_GPIO 83
-#define COLIBRI270_BVD2_GPIO 82
-#define COLIBRI270_DETECT_GPIO 84
-#define COLIBRI270_READY_GPIO 1
-
-#define COLIBRI320_RESET_GPIO 77
-#define COLIBRI320_PPEN_GPIO 57
-#define COLIBRI320_BVD1_GPIO 53
-#define COLIBRI320_BVD2_GPIO 79
-#define COLIBRI320_DETECT_GPIO 81
-#define COLIBRI320_READY_GPIO 29
-
-enum {
- DETECT = 0,
- READY = 1,
- BVD1 = 2,
- BVD2 = 3,
- PPEN = 4,
- RESET = 5,
-};
-
-/* Contents of this array are configured on-the-fly in init function */
-static struct gpio colibri_pcmcia_gpios[] = {
- { 0, GPIOF_IN, "PCMCIA Detect" },
- { 0, GPIOF_IN, "PCMCIA Ready" },
- { 0, GPIOF_IN, "PCMCIA BVD1" },
- { 0, GPIOF_IN, "PCMCIA BVD2" },
- { 0, GPIOF_INIT_LOW, "PCMCIA PPEN" },
- { 0, GPIOF_INIT_HIGH,"PCMCIA Reset" },
-};
-
-static int colibri_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- ret = gpio_request_array(colibri_pcmcia_gpios,
- ARRAY_SIZE(colibri_pcmcia_gpios));
- if (ret)
- goto err1;
-
- skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio);
- skt->stat[SOC_STAT_CD].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio);
- skt->stat[SOC_STAT_CD].name = "PCMCIA CD";
-
-err1:
- return ret;
-}
-
-static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free_array(colibri_pcmcia_gpios,
- ARRAY_SIZE(colibri_pcmcia_gpios));
-}
-
-static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
-
- state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio);
- state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio);
- state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio);
- state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio);
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int
-colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio,
- !(state->Vcc == 33 && state->Vpp < 50));
- gpio_set_value(colibri_pcmcia_gpios[RESET].gpio,
- state->flags & SS_RESET);
- return 0;
-}
-
-static struct pcmcia_low_level colibri_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .first = 0,
- .nr = 1,
-
- .hw_init = colibri_pcmcia_hw_init,
- .hw_shutdown = colibri_pcmcia_hw_shutdown,
-
- .socket_state = colibri_pcmcia_socket_state,
- .configure_socket = colibri_pcmcia_configure_socket,
-};
-
-static struct platform_device *colibri_pcmcia_device;
-
-static int __init colibri_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_colibri() && !machine_is_colibri320())
- return -ENODEV;
-
- colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!colibri_pcmcia_device)
- return -ENOMEM;
-
- /* Colibri PXA270 */
- if (machine_is_colibri()) {
- colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO;
- colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO;
- colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO;
- colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO;
- colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO;
- colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO;
- /* Colibri PXA320 */
- } else if (machine_is_colibri320()) {
- colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO;
- colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO;
- colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO;
- colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO;
- colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO;
- colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO;
- }
-
- ret = platform_device_add_data(colibri_pcmcia_device,
- &colibri_pcmcia_ops, sizeof(colibri_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(colibri_pcmcia_device);
-
- if (ret)
- platform_device_put(colibri_pcmcia_device);
-
- return ret;
-}
-
-static void __exit colibri_pcmcia_exit(void)
-{
- platform_device_unregister(colibri_pcmcia_device);
-}
-
-module_init(colibri_pcmcia_init);
-module_exit(colibri_pcmcia_exit);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PCMCIA support for Toradex Colibri PXA270/PXA320");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
deleted file mode 100644
index 72caa6d05ab9..000000000000
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Toshiba e740 PCMCIA specific routines.
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <mach/eseries-gpio.h>
-
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include "soc_common.h"
-
-static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- if (skt->nr == 0) {
- skt->stat[SOC_STAT_CD].gpio = GPIO_E740_PCMCIA_CD0;
- skt->stat[SOC_STAT_CD].name = "CF card detect";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_E740_PCMCIA_RDY0;
- skt->stat[SOC_STAT_RDY].name = "CF ready";
- } else {
- skt->stat[SOC_STAT_CD].gpio = GPIO_E740_PCMCIA_CD1;
- skt->stat[SOC_STAT_CD].name = "Wifi switch";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_E740_PCMCIA_RDY1;
- skt->stat[SOC_STAT_RDY].name = "Wifi ready";
- }
-
- return 0;
-}
-
-static void e740_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int e740_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- if (state->flags & SS_RESET) {
- if (skt->nr == 0)
- gpio_set_value(GPIO_E740_PCMCIA_RST0, 1);
- else
- gpio_set_value(GPIO_E740_PCMCIA_RST1, 1);
- } else {
- if (skt->nr == 0)
- gpio_set_value(GPIO_E740_PCMCIA_RST0, 0);
- else
- gpio_set_value(GPIO_E740_PCMCIA_RST1, 0);
- }
-
- switch (state->Vcc) {
- case 0: /* Socket off */
- if (skt->nr == 0)
- gpio_set_value(GPIO_E740_PCMCIA_PWR0, 0);
- else
- gpio_set_value(GPIO_E740_PCMCIA_PWR1, 1);
- break;
- case 50:
- case 33: /* socket on */
- if (skt->nr == 0)
- gpio_set_value(GPIO_E740_PCMCIA_PWR0, 1);
- else
- gpio_set_value(GPIO_E740_PCMCIA_PWR1, 0);
- break;
- default:
- printk(KERN_ERR "e740_cs: Unsupported Vcc: %d\n", state->Vcc);
- }
-
- return 0;
-}
-
-static struct pcmcia_low_level e740_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = e740_pcmcia_hw_init,
- .socket_state = e740_pcmcia_socket_state,
- .configure_socket = e740_pcmcia_configure_socket,
- .nr = 2,
-};
-
-static struct platform_device *e740_pcmcia_device;
-
-static int __init e740_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_e740())
- return -ENODEV;
-
- e740_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!e740_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(e740_pcmcia_device, &e740_pcmcia_ops,
- sizeof(e740_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(e740_pcmcia_device);
-
- if (ret)
- platform_device_put(e740_pcmcia_device);
-
- return ret;
-}
-
-static void __exit e740_pcmcia_exit(void)
-{
- platform_device_unregister(e740_pcmcia_device);
-}
-
-module_init(e740_pcmcia_init);
-module_exit(e740_pcmcia_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_DESCRIPTION("e740 PCMCIA platform support");
diff --git a/drivers/pcmcia/pxa2xx_hx4700.c b/drivers/pcmcia/pxa2xx_hx4700.c
deleted file mode 100644
index 87b6a1639d94..000000000000
--- a/drivers/pcmcia/pxa2xx_hx4700.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-
-#include <asm/mach-types.h>
-#include <mach/hx4700.h>
-
-#include "soc_common.h"
-
-static struct gpio gpios[] = {
- { GPIO114_HX4700_CF_RESET, GPIOF_OUT_INIT_LOW, "CF reset" },
- { EGPIO4_CF_3V3_ON, GPIOF_OUT_INIT_LOW, "CF 3.3V enable" },
-};
-
-static int hx4700_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- ret = gpio_request_array(gpios, ARRAY_SIZE(gpios));
- if (ret)
- goto out;
-
- /*
- * IRQ type must be set before soc_pcmcia_hw_init() calls request_irq().
- * The asic3 default IRQ type is level trigger low level detect, exactly
- * the the signal present on GPIOD4_CF_nCD when a CF card is inserted.
- * If the IRQ type is not changed, the asic3 interrupt handler will loop
- * repeatedly because it is unable to clear the level trigger interrupt.
- */
- irq_set_irq_type(gpio_to_irq(GPIOD4_CF_nCD), IRQ_TYPE_EDGE_BOTH);
-
- skt->stat[SOC_STAT_CD].gpio = GPIOD4_CF_nCD;
- skt->stat[SOC_STAT_CD].name = "PCMCIA CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO60_HX4700_CF_RNB;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
-
-out:
- return ret;
-}
-
-static void hx4700_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free_array(gpios, ARRAY_SIZE(gpios));
-}
-
-static void hx4700_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int hx4700_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- switch (state->Vcc) {
- case 0:
- gpio_set_value(EGPIO4_CF_3V3_ON, 0);
- break;
- case 33:
- gpio_set_value(EGPIO4_CF_3V3_ON, 1);
- break;
- default:
- printk(KERN_ERR "pcmcia: Unsupported Vcc: %d\n", state->Vcc);
- return -EINVAL;
- }
-
- gpio_set_value(GPIO114_HX4700_CF_RESET, (state->flags & SS_RESET) != 0);
-
- return 0;
-}
-
-static struct pcmcia_low_level hx4700_pcmcia_ops = {
- .owner = THIS_MODULE,
- .nr = 1,
- .hw_init = hx4700_pcmcia_hw_init,
- .hw_shutdown = hx4700_pcmcia_hw_shutdown,
- .socket_state = hx4700_pcmcia_socket_state,
- .configure_socket = hx4700_pcmcia_configure_socket,
-};
-
-static struct platform_device *hx4700_pcmcia_device;
-
-static int __init hx4700_pcmcia_init(void)
-{
- struct platform_device *pdev;
-
- if (!machine_is_h4700())
- return -ENODEV;
-
- pdev = platform_device_register_data(NULL, "pxa2xx-pcmcia", -1,
- &hx4700_pcmcia_ops, sizeof(hx4700_pcmcia_ops));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- hx4700_pcmcia_device = pdev;
-
- return 0;
-}
-
-static void __exit hx4700_pcmcia_exit(void)
-{
- platform_device_unregister(hx4700_pcmcia_device);
-}
-
-module_init(hx4700_pcmcia_init);
-module_exit(hx4700_pcmcia_exit);
-
-MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
-MODULE_DESCRIPTION("HP iPAQ hx4700 PCMCIA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
deleted file mode 100644
index cfff41ac9ca2..000000000000
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_palmld.c
- *
- * Driver for Palm LifeDrive PCMCIA
- *
- * Copyright (C) 2006 Alex Osborne <ato@meshy.org>
- * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <asm/mach-types.h>
-#include <mach/palmld.h>
-#include "soc_common.h"
-
-static struct gpio palmld_pcmcia_gpios[] = {
- { GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" },
- { GPIO_NR_PALMLD_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
-};
-
-static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- ret = gpio_request_array(palmld_pcmcia_gpios,
- ARRAY_SIZE(palmld_pcmcia_gpios));
-
- skt->stat[SOC_STAT_RDY].gpio = GPIO_NR_PALMLD_PCMCIA_READY;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
-
- return ret;
-}
-
-static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios));
-}
-
-static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->detect = 1; /* always inserted */
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- gpio_set_value(GPIO_NR_PALMLD_PCMCIA_POWER, 1);
- gpio_set_value(GPIO_NR_PALMLD_PCMCIA_RESET,
- !!(state->flags & SS_RESET));
-
- return 0;
-}
-
-static struct pcmcia_low_level palmld_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .first = 1,
- .nr = 1,
-
- .hw_init = palmld_pcmcia_hw_init,
- .hw_shutdown = palmld_pcmcia_hw_shutdown,
-
- .socket_state = palmld_pcmcia_socket_state,
- .configure_socket = palmld_pcmcia_configure_socket,
-};
-
-static struct platform_device *palmld_pcmcia_device;
-
-static int __init palmld_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_palmld())
- return -ENODEV;
-
- palmld_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!palmld_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(palmld_pcmcia_device, &palmld_pcmcia_ops,
- sizeof(palmld_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(palmld_pcmcia_device);
-
- if (ret)
- platform_device_put(palmld_pcmcia_device);
-
- return ret;
-}
-
-static void __exit palmld_pcmcia_exit(void)
-{
- platform_device_unregister(palmld_pcmcia_device);
-}
-
-module_init(palmld_pcmcia_init);
-module_exit(palmld_pcmcia_exit);
-
-MODULE_AUTHOR("Alex Osborne <ato@meshy.org>,"
- " Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PCMCIA support for Palm LifeDrive");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
deleted file mode 100644
index 8fe05613ed04..000000000000
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_palmtc.c
- *
- * Driver for Palm Tungsten|C PCMCIA
- *
- * Copyright (C) 2008 Alex Osborne <ato@meshy.org>
- * Copyright (C) 2009-2011 Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-
-#include <asm/mach-types.h>
-#include <mach/palmtc.h>
-#include "soc_common.h"
-
-static struct gpio palmtc_pcmcia_gpios[] = {
- { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
- { GPIO_NR_PALMTC_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" },
- { GPIO_NR_PALMTC_PCMCIA_POWER3, GPIOF_INIT_LOW, "PCMCIA Power 3" },
- { GPIO_NR_PALMTC_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
- { GPIO_NR_PALMTC_PCMCIA_PWRREADY, GPIOF_IN, "PCMCIA Power Ready" },
-};
-
-static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- ret = gpio_request_array(palmtc_pcmcia_gpios,
- ARRAY_SIZE(palmtc_pcmcia_gpios));
-
- skt->stat[SOC_STAT_RDY].gpio = GPIO_NR_PALMTC_PCMCIA_READY;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
-
- return ret;
-}
-
-static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios));
-}
-
-static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->detect = 1; /* always inserted */
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int palmtc_wifi_powerdown(void)
-{
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
- mdelay(40);
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
- return 0;
-}
-
-static int palmtc_wifi_powerup(void)
-{
- int timeout = 50;
-
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1);
- mdelay(50);
-
- /* Power up the card, 1.8V first, after a while 3.3V */
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1);
- mdelay(100);
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1);
-
- /* Wait till the card is ready */
- while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) &&
- timeout) {
- mdelay(1);
- timeout--;
- }
-
- /* Power down the WiFi in case of error */
- if (!timeout) {
- palmtc_wifi_powerdown();
- return 1;
- }
-
- /* Reset the card */
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
- mdelay(20);
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0);
- mdelay(25);
-
- gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
-
- return 0;
-}
-
-static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- int ret = 1;
-
- if (state->Vcc == 0)
- ret = palmtc_wifi_powerdown();
- else if (state->Vcc == 33)
- ret = palmtc_wifi_powerup();
-
- return ret;
-}
-
-static struct pcmcia_low_level palmtc_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .first = 0,
- .nr = 1,
-
- .hw_init = palmtc_pcmcia_hw_init,
- .hw_shutdown = palmtc_pcmcia_hw_shutdown,
-
- .socket_state = palmtc_pcmcia_socket_state,
- .configure_socket = palmtc_pcmcia_configure_socket,
-};
-
-static struct platform_device *palmtc_pcmcia_device;
-
-static int __init palmtc_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_palmtc())
- return -ENODEV;
-
- palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!palmtc_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops,
- sizeof(palmtc_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(palmtc_pcmcia_device);
-
- if (ret)
- platform_device_put(palmtc_pcmcia_device);
-
- return ret;
-}
-
-static void __exit palmtc_pcmcia_exit(void)
-{
- platform_device_unregister(palmtc_pcmcia_device);
-}
-
-module_init(palmtc_pcmcia_init);
-module_exit(palmtc_pcmcia_exit);
-
-MODULE_AUTHOR("Alex Osborne <ato@meshy.org>,"
- " Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
deleted file mode 100644
index c449ca72cb87..000000000000
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_palmtx.c
- *
- * Driver for Palm T|X PCMCIA
- *
- * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <asm/mach-types.h>
-#include <mach/palmtx.h>
-#include "soc_common.h"
-
-static struct gpio palmtx_pcmcia_gpios[] = {
- { GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
- { GPIO_NR_PALMTX_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" },
- { GPIO_NR_PALMTX_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
-};
-
-static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- ret = gpio_request_array(palmtx_pcmcia_gpios,
- ARRAY_SIZE(palmtx_pcmcia_gpios));
-
- skt->stat[SOC_STAT_RDY].gpio = GPIO_NR_PALMTX_PCMCIA_READY;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
-
- return ret;
-}
-
-static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free_array(palmtx_pcmcia_gpios, ARRAY_SIZE(palmtx_pcmcia_gpios));
-}
-
-static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->detect = 1; /* always inserted */
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int
-palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
- gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
- gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
- !!(state->flags & SS_RESET));
-
- return 0;
-}
-
-static struct pcmcia_low_level palmtx_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .first = 0,
- .nr = 1,
-
- .hw_init = palmtx_pcmcia_hw_init,
- .hw_shutdown = palmtx_pcmcia_hw_shutdown,
-
- .socket_state = palmtx_pcmcia_socket_state,
- .configure_socket = palmtx_pcmcia_configure_socket,
-};
-
-static struct platform_device *palmtx_pcmcia_device;
-
-static int __init palmtx_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_palmtx())
- return -ENODEV;
-
- palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!palmtx_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
- sizeof(palmtx_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(palmtx_pcmcia_device);
-
- if (ret)
- platform_device_put(palmtx_pcmcia_device);
-
- return ret;
-}
-
-static void __exit palmtx_pcmcia_exit(void)
-{
- platform_device_unregister(palmtx_pcmcia_device);
-}
-
-module_init(palmtx_pcmcia_init);
-module_exit(palmtx_pcmcia_exit);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 5fdd25a9e28e..b3ba858f70cb 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -15,11 +15,10 @@
#include <linux/platform_device.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/scoop.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
#define NO_KEEP_VS 0x0001
#define SCOOP_DEV platform_scoop_config->devs
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
deleted file mode 100644
index 6db8fe880ed4..000000000000
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ /dev/null
@@ -1,200 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_trizeps4.c
- *
- * TRIZEPS PCMCIA specific routines.
- *
- * Author: Jürgen Schindele
- * Created: 20 02, 2006
- * Copyright: Jürgen Schindele
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach-types.h>
-#include <asm/irq.h>
-
-#include <mach/pxa2xx-regs.h>
-#include <mach/trizeps4.h>
-
-#include "soc_common.h"
-
-extern void board_pcmcia_power(int power);
-
-static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- /* we dont have voltage/card/ready detection
- * so we dont need interrupts for it
- */
- switch (skt->nr) {
- case 0:
- skt->stat[SOC_STAT_CD].gpio = GPIO_PCD;
- skt->stat[SOC_STAT_CD].name = "cs0_cd";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_PRDY;
- skt->stat[SOC_STAT_RDY].name = "cs0_rdy";
- break;
- default:
- break;
- }
- /* release the reset of this card */
- pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq);
-
- return 0;
-}
-
-static unsigned long trizeps_pcmcia_status[2];
-
-static void trizeps_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- unsigned short status = 0, change;
- status = CFSR_readw();
- change = (status ^ trizeps_pcmcia_status[skt->nr]) &
- ConXS_CFSR_BVD_MASK;
- if (change) {
- trizeps_pcmcia_status[skt->nr] = status;
- if (status & ConXS_CFSR_BVD1) {
- /* enable_irq empty */
- } else {
- /* disable_irq empty */
- }
- }
-
- switch (skt->nr) {
- case 0:
- /* just fill in fix states */
- state->bvd1 = (status & ConXS_CFSR_BVD1) ? 1 : 0;
- state->bvd2 = (status & ConXS_CFSR_BVD2) ? 1 : 0;
- state->vs_3v = (status & ConXS_CFSR_VS1) ? 0 : 1;
- state->vs_Xv = (status & ConXS_CFSR_VS2) ? 0 : 1;
- break;
-
-#ifndef CONFIG_MACH_TRIZEPS_CONXS
- /* on ConXS we only have one slot. Second is inactive */
- case 1:
- state->detect = 0;
- state->ready = 0;
- state->bvd1 = 0;
- state->bvd2 = 0;
- state->vs_3v = 0;
- state->vs_Xv = 0;
- break;
-
-#endif
- }
-}
-
-static int trizeps_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- int ret = 0;
- unsigned short power = 0;
-
- /* we do nothing here just check a bit */
- switch (state->Vcc) {
- case 0: power &= 0xfc; break;
- case 33: power |= ConXS_BCR_S0_VCC_3V3; break;
- case 50:
- pr_err("%s(): Vcc 5V not supported in socket\n", __func__);
- break;
- default:
- pr_err("%s(): bad Vcc %u\n", __func__, state->Vcc);
- ret = -1;
- }
-
- switch (state->Vpp) {
- case 0: power &= 0xf3; break;
- case 33: power |= ConXS_BCR_S0_VPP_3V3; break;
- case 120:
- pr_err("%s(): Vpp 12V not supported in socket\n", __func__);
- break;
- default:
- if (state->Vpp != state->Vcc) {
- pr_err("%s(): bad Vpp %u\n", __func__, state->Vpp);
- ret = -1;
- }
- }
-
- switch (skt->nr) {
- case 0: /* we only have 3.3V */
- board_pcmcia_power(power);
- break;
-
-#ifndef CONFIG_MACH_TRIZEPS_CONXS
- /* on ConXS we only have one slot. Second is inactive */
- case 1:
-#endif
- default:
- break;
- }
-
- return ret;
-}
-
-static void trizeps_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
- /* default is on */
- board_pcmcia_power(0x9);
-}
-
-static void trizeps_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
- board_pcmcia_power(0x0);
-}
-
-static struct pcmcia_low_level trizeps_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = trizeps_pcmcia_hw_init,
- .socket_state = trizeps_pcmcia_socket_state,
- .configure_socket = trizeps_pcmcia_configure_socket,
- .socket_init = trizeps_pcmcia_socket_init,
- .socket_suspend = trizeps_pcmcia_socket_suspend,
-#ifdef CONFIG_MACH_TRIZEPS_CONXS
- .nr = 1,
-#else
- .nr = 2,
-#endif
- .first = 0,
-};
-
-static struct platform_device *trizeps_pcmcia_device;
-
-static int __init trizeps_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_trizeps4() && !machine_is_trizeps4wl())
- return -ENODEV;
-
- trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!trizeps_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(trizeps_pcmcia_device,
- &trizeps_pcmcia_ops, sizeof(trizeps_pcmcia_ops));
-
- if (ret == 0)
- ret = platform_device_add(trizeps_pcmcia_device);
-
- if (ret)
- platform_device_put(trizeps_pcmcia_device);
-
- return ret;
-}
-
-static void __exit trizeps_pcmcia_exit(void)
-{
- platform_device_unregister(trizeps_pcmcia_device);
-}
-
-fs_initcall(trizeps_pcmcia_init);
-module_exit(trizeps_pcmcia_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Juergen Schindele");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
deleted file mode 100644
index 7ac6647d286e..000000000000
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Viper/Zeus PCMCIA support
- * Copyright 2004 Arcom Control Systems
- *
- * Maintained by Marc Zyngier <maz@misterjones.org>
- *
- * Based on:
- * iPAQ h2200 PCMCIA support
- * Copyright 2004 Koen Kooi <koen@vestingbar.nl>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <pcmcia/ss.h>
-
-#include <asm/irq.h>
-
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
-
-#include "soc_common.h"
-#include "pxa2xx_base.h"
-
-static struct platform_device *arcom_pcmcia_dev;
-
-static inline struct arcom_pcmcia_pdata *viper_get_pdata(void)
-{
- return arcom_pcmcia_dev->dev.platform_data;
-}
-
-static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
- unsigned long flags;
-
- skt->stat[SOC_STAT_CD].gpio = pdata->cd_gpio;
- skt->stat[SOC_STAT_CD].name = "PCMCIA_CD";
- skt->stat[SOC_STAT_RDY].gpio = pdata->rdy_gpio;
- skt->stat[SOC_STAT_RDY].name = "CF ready";
-
- if (gpio_request(pdata->pwr_gpio, "CF power"))
- goto err_request_pwr;
-
- local_irq_save(flags);
-
- if (gpio_direction_output(pdata->pwr_gpio, 0)) {
- local_irq_restore(flags);
- goto err_dir;
- }
-
- local_irq_restore(flags);
-
- return 0;
-
-err_dir:
- gpio_free(pdata->pwr_gpio);
-err_request_pwr:
- dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n");
- return -1;
-}
-
-/*
- * Release all resources.
- */
-static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
-
- gpio_free(pdata->pwr_gpio);
-}
-
-static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 1; /* Can only apply 3.3V */
- state->vs_Xv = 0;
-}
-
-static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
-
- /* Silently ignore Vpp, output enable, speaker enable. */
- pdata->reset(state->flags & SS_RESET);
-
- /* Apply socket voltage */
- switch (state->Vcc) {
- case 0:
- gpio_set_value(pdata->pwr_gpio, 0);
- break;
- case 33:
- gpio_set_value(pdata->pwr_gpio, 1);
- break;
- default:
- dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc);
- return -1;
- }
-
- return 0;
-}
-
-static struct pcmcia_low_level viper_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = viper_pcmcia_hw_init,
- .hw_shutdown = viper_pcmcia_hw_shutdown,
- .socket_state = viper_pcmcia_socket_state,
- .configure_socket = viper_pcmcia_configure_socket,
- .nr = 1,
-};
-
-static struct platform_device *viper_pcmcia_device;
-
-static int viper_pcmcia_probe(struct platform_device *pdev)
-{
- int ret;
-
- /* I can't imagine more than one device, but you never know... */
- if (arcom_pcmcia_dev)
- return -EEXIST;
-
- if (!pdev->dev.platform_data)
- return -EINVAL;
-
- viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!viper_pcmcia_device)
- return -ENOMEM;
-
- arcom_pcmcia_dev = pdev;
-
- viper_pcmcia_device->dev.parent = &pdev->dev;
-
- ret = platform_device_add_data(viper_pcmcia_device,
- &viper_pcmcia_ops,
- sizeof(viper_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(viper_pcmcia_device);
-
- if (ret) {
- platform_device_put(viper_pcmcia_device);
- arcom_pcmcia_dev = NULL;
- }
-
- return ret;
-}
-
-static int viper_pcmcia_remove(struct platform_device *pdev)
-{
- platform_device_unregister(viper_pcmcia_device);
- arcom_pcmcia_dev = NULL;
- return 0;
-}
-
-static struct platform_device_id viper_pcmcia_id_table[] = {
- { .name = "viper-pcmcia", },
- { .name = "zeus-pcmcia", },
- { },
-};
-
-static struct platform_driver viper_pcmcia_driver = {
- .probe = viper_pcmcia_probe,
- .remove = viper_pcmcia_remove,
- .driver = {
- .name = "arcom-pcmcia",
- },
- .id_table = viper_pcmcia_id_table,
-};
-
-module_platform_driver(viper_pcmcia_driver);
-
-MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
deleted file mode 100644
index 3565add03a5e..000000000000
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_vpac270.c
- *
- * Driver for Voipac PXA270 PCMCIA and CF sockets
- *
- * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/gpio.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach-types.h>
-
-#include <mach/vpac270.h>
-
-#include "soc_common.h"
-
-static struct gpio vpac270_pcmcia_gpios[] = {
- { GPIO107_VPAC270_PCMCIA_PPEN, GPIOF_INIT_LOW, "PCMCIA PPEN" },
- { GPIO11_VPAC270_PCMCIA_RESET, GPIOF_INIT_LOW, "PCMCIA Reset" },
-};
-
-static struct gpio vpac270_cf_gpios[] = {
- { GPIO16_VPAC270_CF_RESET, GPIOF_INIT_LOW, "CF Reset" },
-};
-
-static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret;
-
- if (skt->nr == 0) {
- ret = gpio_request_array(vpac270_pcmcia_gpios,
- ARRAY_SIZE(vpac270_pcmcia_gpios));
-
- skt->stat[SOC_STAT_CD].gpio = GPIO84_VPAC270_PCMCIA_CD;
- skt->stat[SOC_STAT_CD].name = "PCMCIA CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO35_VPAC270_PCMCIA_RDY;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
- } else {
- ret = gpio_request_array(vpac270_cf_gpios,
- ARRAY_SIZE(vpac270_cf_gpios));
-
- skt->stat[SOC_STAT_CD].gpio = GPIO17_VPAC270_CF_CD;
- skt->stat[SOC_STAT_CD].name = "CF CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO12_VPAC270_CF_RDY;
- skt->stat[SOC_STAT_RDY].name = "CF Ready";
- }
-
- return ret;
-}
-
-static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- if (skt->nr == 0)
- gpio_free_array(vpac270_pcmcia_gpios,
- ARRAY_SIZE(vpac270_pcmcia_gpios));
- else
- gpio_free_array(vpac270_cf_gpios,
- ARRAY_SIZE(vpac270_cf_gpios));
-}
-
-static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
-static int
-vpac270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- if (skt->nr == 0) {
- gpio_set_value(GPIO11_VPAC270_PCMCIA_RESET,
- (state->flags & SS_RESET));
- gpio_set_value(GPIO107_VPAC270_PCMCIA_PPEN,
- !(state->Vcc == 33 || state->Vcc == 50));
- } else {
- gpio_set_value(GPIO16_VPAC270_CF_RESET,
- (state->flags & SS_RESET));
- }
-
- return 0;
-}
-
-static struct pcmcia_low_level vpac270_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .first = 0,
- .nr = 2,
-
- .hw_init = vpac270_pcmcia_hw_init,
- .hw_shutdown = vpac270_pcmcia_hw_shutdown,
-
- .socket_state = vpac270_pcmcia_socket_state,
- .configure_socket = vpac270_pcmcia_configure_socket,
-};
-
-static struct platform_device *vpac270_pcmcia_device;
-
-static int __init vpac270_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_vpac270())
- return -ENODEV;
-
- vpac270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!vpac270_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(vpac270_pcmcia_device,
- &vpac270_pcmcia_ops, sizeof(vpac270_pcmcia_ops));
-
- if (!ret)
- ret = platform_device_add(vpac270_pcmcia_device);
-
- if (ret)
- platform_device_put(vpac270_pcmcia_device);
-
- return ret;
-}
-
-static void __exit vpac270_pcmcia_exit(void)
-{
- platform_device_unregister(vpac270_pcmcia_device);
-}
-
-module_init(vpac270_pcmcia_init);
-module_exit(vpac270_pcmcia_exit);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PCMCIA support for Voipac PXA270");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 29fdd174bc23..bce664bbdc98 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -17,7 +17,6 @@
#include <pcmcia/ss.h>
-#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index 7feb8d61c639..f1b5160cb8fa 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
#include <asm/mach-types.h>
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 3a8c84bb174d..61b0c8952bb5 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -46,8 +46,7 @@
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
-
-#include <mach/hardware.h>
+#include <linux/pci.h>
#include "soc_common.h"
@@ -784,8 +783,7 @@ void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
/* should not be required; violates some lowlevel drivers */
soc_common_pcmcia_config_skt(skt, &dead_socket);
- iounmap(skt->virt_io);
- skt->virt_io = NULL;
+ iounmap(PCI_IOBASE + skt->res_io_io.start);
release_resource(&skt->res_attr);
release_resource(&skt->res_mem);
release_resource(&skt->res_io);
@@ -818,11 +816,12 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
if (ret)
goto out_err_4;
- skt->virt_io = ioremap(skt->res_io.start, 0x10000);
- if (skt->virt_io == NULL) {
- ret = -ENOMEM;
+ skt->res_io_io = (struct resource)
+ DEFINE_RES_IO_NAMED(skt->nr * 0x1000 + 0x10000, 0x1000,
+ "PCMCIA I/O");
+ ret = pci_remap_iospace(&skt->res_io_io, skt->res_io.start);
+ if (ret)
goto out_err_5;
- }
/*
* We initialize default socket timing here, because
@@ -840,7 +839,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
skt->socket.resource_ops = &pccard_static_ops;
skt->socket.irq_mask = 0;
skt->socket.map_size = PAGE_SIZE;
- skt->socket.io_offset = (unsigned long)skt->virt_io;
+ skt->socket.io_offset = (unsigned long)skt->res_io_io.start;
skt->status = soc_common_pcmcia_skt_state(skt);
@@ -874,7 +873,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
out_err_7:
soc_pcmcia_hw_shutdown(skt);
out_err_6:
- iounmap(skt->virt_io);
+ iounmap(PCI_IOBASE + skt->res_io_io.start);
out_err_5:
release_resource(&skt->res_attr);
out_err_4:
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 222e81c79365..17ef05aa8afe 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -13,137 +13,19 @@
/* include the world */
#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
-
+#include <pcmcia/soc_common.h>
struct device;
struct gpio_desc;
struct pcmcia_low_level;
struct regulator;
-struct soc_pcmcia_regulator {
- struct regulator *reg;
- bool on;
-};
-
-/*
- * This structure encapsulates per-socket state which we might need to
- * use when responding to a Card Services query of some kind.
- */
-struct soc_pcmcia_socket {
- struct pcmcia_socket socket;
-
- /*
- * Info from low level handler
- */
- unsigned int nr;
- struct clk *clk;
-
- /*
- * Core PCMCIA state
- */
- const struct pcmcia_low_level *ops;
-
- unsigned int status;
- socket_state_t cs_state;
-
- unsigned short spd_io[MAX_IO_WIN];
- unsigned short spd_mem[MAX_WIN];
- unsigned short spd_attr[MAX_WIN];
-
- struct resource res_skt;
- struct resource res_io;
- struct resource res_mem;
- struct resource res_attr;
- void __iomem *virt_io;
-
- struct {
- int gpio;
- struct gpio_desc *desc;
- unsigned int irq;
- const char *name;
- } stat[6];
-#define SOC_STAT_CD 0 /* Card detect */
-#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
-#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
-#define SOC_STAT_RDY 3 /* Ready / Interrupt */
-#define SOC_STAT_VS1 4 /* Voltage sense 1 */
-#define SOC_STAT_VS2 5 /* Voltage sense 2 */
-
- struct gpio_desc *gpio_reset;
- struct gpio_desc *gpio_bus_enable;
- struct soc_pcmcia_regulator vcc;
- struct soc_pcmcia_regulator vpp;
-
- unsigned int irq_state;
-
-#ifdef CONFIG_CPU_FREQ
- struct notifier_block cpufreq_nb;
-#endif
- struct timer_list poll_timer;
- struct list_head node;
- void *driver_data;
-};
-
struct skt_dev_info {
int nskt;
struct soc_pcmcia_socket skt[];
};
-struct pcmcia_state {
- unsigned detect: 1,
- ready: 1,
- bvd1: 1,
- bvd2: 1,
- wrprot: 1,
- vs_3v: 1,
- vs_Xv: 1;
-};
-
-struct pcmcia_low_level {
- struct module *owner;
-
- /* first socket in system */
- int first;
- /* nr of sockets */
- int nr;
-
- int (*hw_init)(struct soc_pcmcia_socket *);
- void (*hw_shutdown)(struct soc_pcmcia_socket *);
-
- void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
- int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
-
- /*
- * Enable card status IRQs on (re-)initialisation. This can
- * be called at initialisation, power management event, or
- * pcmcia event.
- */
- void (*socket_init)(struct soc_pcmcia_socket *);
-
- /*
- * Disable card status IRQs and PCMCIA bus on suspend.
- */
- void (*socket_suspend)(struct soc_pcmcia_socket *);
-
- /*
- * Hardware specific timing routines.
- * If provided, the get_timing routine overrides the SOC default.
- */
- unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
- int (*set_timing)(struct soc_pcmcia_socket *);
- int (*show_timing)(struct soc_pcmcia_socket *, char *);
-
-#ifdef CONFIG_CPU_FREQ
- /*
- * CPUFREQ support.
- */
- int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
-#endif
-};
-
-
struct soc_pcmcia_timing {
unsigned short io;
unsigned short mem;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 82b63e60c5a2..300b0f2b5f84 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -64,6 +64,7 @@ config USB_LGM_PHY
config PHY_CAN_TRANSCEIVER
tristate "CAN transceiver PHY"
select GENERIC_PHY
+ select MULTIPLEXER
help
This option enables support for CAN transceivers as a PHY. This
driver provides function for putting the transceivers in various
diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
index f0bc87d654d4..3900f1650851 100644
--- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
+++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
@@ -24,6 +24,14 @@
#define SUN6I_DPHY_TX_CTL_REG 0x04
#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT BIT(28)
+#define SUN6I_DPHY_RX_CTL_REG 0x08
+#define SUN6I_DPHY_RX_CTL_EN_DBC BIT(31)
+#define SUN6I_DPHY_RX_CTL_RX_CLK_FORCE BIT(24)
+#define SUN6I_DPHY_RX_CTL_RX_D3_FORCE BIT(23)
+#define SUN6I_DPHY_RX_CTL_RX_D2_FORCE BIT(22)
+#define SUN6I_DPHY_RX_CTL_RX_D1_FORCE BIT(21)
+#define SUN6I_DPHY_RX_CTL_RX_D0_FORCE BIT(20)
+
#define SUN6I_DPHY_TX_TIME0_REG 0x10
#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24)
#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16)
@@ -44,12 +52,29 @@
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff)
+#define SUN6I_DPHY_RX_TIME0_REG 0x30
+#define SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(n) (((n) & 0xff) << 24)
+#define SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(n) (((n) & 0xff) << 16)
+#define SUN6I_DPHY_RX_TIME0_LP_RX(n) (((n) & 0xff) << 8)
+
+#define SUN6I_DPHY_RX_TIME1_REG 0x34
+#define SUN6I_DPHY_RX_TIME1_RX_DLY(n) (((n) & 0xfff) << 20)
+#define SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(n) ((n) & 0xfffff)
+
+#define SUN6I_DPHY_RX_TIME2_REG 0x38
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA1(n) (((n) & 0xff) << 8)
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(n) ((n) & 0xff)
+
+#define SUN6I_DPHY_RX_TIME3_REG 0x40
+#define SUN6I_DPHY_RX_TIME3_LPRST_DLY(n) (((n) & 0xffff) << 16)
+
#define SUN6I_DPHY_ANA0_REG 0x4c
#define SUN6I_DPHY_ANA0_REG_PWS BIT(31)
#define SUN6I_DPHY_ANA0_REG_DMPC BIT(28)
#define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24)
#define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12)
#define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8)
+#define SUN6I_DPHY_ANA0_REG_SFB(n) (((n) & 3) << 2)
#define SUN6I_DPHY_ANA1_REG 0x50
#define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31)
@@ -84,6 +109,11 @@
#define SUN6I_DPHY_DBG5_REG 0xf4
+enum sun6i_dphy_direction {
+ SUN6I_DPHY_DIRECTION_TX,
+ SUN6I_DPHY_DIRECTION_RX,
+};
+
struct sun6i_dphy {
struct clk *bus_clk;
struct clk *mod_clk;
@@ -92,6 +122,8 @@ struct sun6i_dphy {
struct phy *phy;
struct phy_configure_opts_mipi_dphy config;
+
+ enum sun6i_dphy_direction direction;
};
static int sun6i_dphy_init(struct phy *phy)
@@ -119,9 +151,8 @@ static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
return 0;
}
-static int sun6i_dphy_power_on(struct phy *phy)
+static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy)
{
- struct sun6i_dphy *dphy = phy_get_drvdata(phy);
u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
@@ -211,12 +242,129 @@ static int sun6i_dphy_power_on(struct phy *phy)
return 0;
}
+static int sun6i_dphy_rx_power_on(struct sun6i_dphy *dphy)
+{
+ /* Physical clock rate is actually half of symbol rate with DDR. */
+ unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate;
+ unsigned long dphy_clk_rate;
+ unsigned int rx_dly;
+ unsigned int lprst_dly;
+ u32 value;
+
+ dphy_clk_rate = clk_get_rate(dphy->mod_clk);
+ if (!dphy_clk_rate)
+ return -EINVAL;
+
+ /* Hardcoded timing parameters from the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME0_REG,
+ SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(255) |
+ SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(255) |
+ SUN6I_DPHY_RX_TIME0_LP_RX(255));
+
+ /*
+ * Formula from the Allwinner BSP, with hardcoded coefficients
+ * (probably internal divider/multiplier).
+ */
+ rx_dly = 8 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 8));
+
+ /*
+ * The Allwinner BSP has an alternative formula for LP_RX_ULPS_WP:
+ * lp_ulps_wp_cnt = lp_ulps_wp_ms * lp_clk / 1000
+ * but does not use it and hardcodes 255 instead.
+ */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME1_REG,
+ SUN6I_DPHY_RX_TIME1_RX_DLY(rx_dly) |
+ SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(255));
+
+ /* HS_RX_ANA0 value is hardcoded in the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME2_REG,
+ SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(4));
+
+ /*
+ * Formula from the Allwinner BSP, with hardcoded coefficients
+ * (probably internal divider/multiplier).
+ */
+ lprst_dly = 4 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 2));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME3_REG,
+ SUN6I_DPHY_RX_TIME3_LPRST_DLY(lprst_dly));
+
+ /* Analog parameters are hardcoded in the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
+ SUN6I_DPHY_ANA0_REG_PWS |
+ SUN6I_DPHY_ANA0_REG_SLV(7) |
+ SUN6I_DPHY_ANA0_REG_SFB(2));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
+ SUN6I_DPHY_ANA1_REG_SVTT(4));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
+ SUN6I_DPHY_ANA4_REG_DMPLVC |
+ SUN6I_DPHY_ANA4_REG_DMPLVD(1));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
+ SUN6I_DPHY_ANA2_REG_ENIB);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
+ SUN6I_DPHY_ANA3_EN_LDOR |
+ SUN6I_DPHY_ANA3_EN_LDOC |
+ SUN6I_DPHY_ANA3_EN_LDOD);
+
+ /*
+ * Delay comes from the Allwinner BSP, likely for internal regulator
+ * ramp-up.
+ */
+ udelay(3);
+
+ value = SUN6I_DPHY_RX_CTL_EN_DBC | SUN6I_DPHY_RX_CTL_RX_CLK_FORCE;
+
+ /*
+ * Rx data lane force-enable bits are used as regular RX enable by the
+ * Allwinner BSP.
+ */
+ if (dphy->config.lanes >= 1)
+ value |= SUN6I_DPHY_RX_CTL_RX_D0_FORCE;
+ if (dphy->config.lanes >= 2)
+ value |= SUN6I_DPHY_RX_CTL_RX_D1_FORCE;
+ if (dphy->config.lanes >= 3)
+ value |= SUN6I_DPHY_RX_CTL_RX_D2_FORCE;
+ if (dphy->config.lanes == 4)
+ value |= SUN6I_DPHY_RX_CTL_RX_D3_FORCE;
+
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_CTL_REG, value);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
+ SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
+ SUN6I_DPHY_GCTL_EN);
+
+ return 0;
+}
+
+static int sun6i_dphy_power_on(struct phy *phy)
+{
+ struct sun6i_dphy *dphy = phy_get_drvdata(phy);
+
+ switch (dphy->direction) {
+ case SUN6I_DPHY_DIRECTION_TX:
+ return sun6i_dphy_tx_power_on(dphy);
+ case SUN6I_DPHY_DIRECTION_RX:
+ return sun6i_dphy_rx_power_on(dphy);
+ default:
+ return -EINVAL;
+ }
+}
+
static int sun6i_dphy_power_off(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
- regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
- SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, 0);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, 0);
return 0;
}
@@ -253,7 +401,9 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct sun6i_dphy *dphy;
+ const char *direction;
void __iomem *regs;
+ int ret;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
@@ -290,6 +440,14 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
return PTR_ERR(dphy->phy);
}
+ dphy->direction = SUN6I_DPHY_DIRECTION_TX;
+
+ ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction",
+ &direction);
+
+ if (!ret && !strncmp(direction, "rx", 2))
+ dphy->direction = SUN6I_DPHY_DIRECTION_RX;
+
phy_set_drvdata(dphy->phy, dphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 6b917f7bddbe..73fb99ccd525 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -83,6 +83,7 @@
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
@@ -1684,6 +1685,66 @@ static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
{0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
@@ -1765,6 +1826,69 @@ static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -1840,6 +1964,69 @@ static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -2299,9 +2486,9 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
[INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
},
[TYPE_QSGMII] = {
- [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
- [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
- [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
},
},
[TYPE_USB] = {
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index a95572b397ca..e625b32889bf 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -4,17 +4,33 @@
* Copyright 2019 Purism SPC
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+/* Control and Status Registers(CSR) */
+#define PHY_CTRL 0x00
+#define CCM_MASK GENMASK(7, 5)
+#define CCM(n) FIELD_PREP(CCM_MASK, (n))
+#define CCM_1_2V 0x5
+#define CA_MASK GENMASK(4, 2)
+#define CA_3_51MA 0x4
+#define CA(n) FIELD_PREP(CA_MASK, (n))
+#define RFB BIT(1)
+#define LVDS_EN BIT(0)
/* DPHY registers */
#define DPHY_PD_DPHY 0x00
@@ -55,8 +71,15 @@
#define PWR_ON 0
#define PWR_OFF 1
+#define MIN_VCO_FREQ 640000000
+#define MAX_VCO_FREQ 1500000000
+
+#define MIN_LVDS_REFCLK_FREQ 24000000
+#define MAX_LVDS_REFCLK_FREQ 150000000
+
enum mixel_dphy_devtype {
MIXEL_IMX8MQ,
+ MIXEL_IMX8QXP,
};
struct mixel_dphy_devdata {
@@ -65,6 +88,7 @@ struct mixel_dphy_devdata {
u8 reg_rxlprp;
u8 reg_rxcdrp;
u8 reg_rxhs_settle;
+ bool is_combo; /* MIPI DPHY and LVDS PHY combo */
};
static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
@@ -74,6 +98,10 @@ static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
.reg_rxlprp = 0x40,
.reg_rxcdrp = 0x44,
.reg_rxhs_settle = 0x48,
+ .is_combo = false,
+ },
+ [MIXEL_IMX8QXP] = {
+ .is_combo = true,
},
};
@@ -95,8 +123,12 @@ struct mixel_dphy_cfg {
struct mixel_dphy_priv {
struct mixel_dphy_cfg cfg;
struct regmap *regmap;
+ struct regmap *lvds_regmap;
struct clk *phy_ref_clk;
const struct mixel_dphy_devdata *devdata;
+ struct imx_sc_ipc *ipc_handle;
+ bool is_slave;
+ int id;
};
static const struct regmap_config mixel_dphy_regmap_config = {
@@ -317,7 +349,8 @@ static int mixel_dphy_set_pll_params(struct phy *phy)
return 0;
}
-static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+static int
+mixel_dphy_configure_mipi_dphy(struct phy *phy, union phy_configure_opts *opts)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
struct mixel_dphy_cfg cfg = { 0 };
@@ -345,15 +378,126 @@ static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
return 0;
}
+static int
+mixel_dphy_configure_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ struct phy_configure_opts_lvds *lvds_opts = &opts->lvds;
+ unsigned long data_rate;
+ unsigned long fvco;
+ u32 rsc;
+ u32 co;
+ int ret;
+
+ priv->is_slave = lvds_opts->is_slave;
+
+ /* LVDS interface pins */
+ regmap_write(priv->lvds_regmap, PHY_CTRL,
+ CCM(CCM_1_2V) | CA(CA_3_51MA) | RFB);
+
+ /* enable MODE8 only for slave LVDS PHY */
+ rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+ ret = imx_sc_misc_set_control(priv->ipc_handle, rsc, IMX_SC_C_DUAL_MODE,
+ lvds_opts->is_slave);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to configure MODE8: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Choose an appropriate divider ratio to meet the requirement of
+ * PLL VCO frequency range.
+ *
+ * ----- 640MHz ~ 1500MHz ------------ ---------------
+ * | VCO | ----------------> | CO divider | -> | LVDS data rate|
+ * ----- FVCO ------------ ---------------
+ * 1/2/4/8 div 7 * differential_clk_rate
+ */
+ data_rate = 7 * lvds_opts->differential_clk_rate;
+ for (co = 1; co <= 8; co *= 2) {
+ fvco = data_rate * co;
+
+ if (fvco >= MIN_VCO_FREQ)
+ break;
+ }
+
+ if (fvco < MIN_VCO_FREQ || fvco > MAX_VCO_FREQ) {
+ dev_err(&phy->dev, "VCO frequency %lu is out of range\n", fvco);
+ return -ERANGE;
+ }
+
+ /*
+ * CO is configurable, while CN and CM are not,
+ * as fixed ratios 1 and 7 are applied respectively.
+ */
+ phy_write(phy, __ffs(co), DPHY_CO);
+
+ /* set reference clock rate */
+ clk_set_rate(priv->phy_ref_clk, lvds_opts->differential_clk_rate);
+
+ return ret;
+}
+
+static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ if (!opts) {
+ dev_err(&phy->dev, "No configuration options\n");
+ return -EINVAL;
+ }
+
+ if (phy->attrs.mode == PHY_MODE_MIPI_DPHY)
+ return mixel_dphy_configure_mipi_dphy(phy, opts);
+ else if (phy->attrs.mode == PHY_MODE_LVDS)
+ return mixel_dphy_configure_lvds_phy(phy, opts);
+
+ dev_err(&phy->dev,
+ "Failed to configure PHY with invalid PHY mode: %d\n", phy->attrs.mode);
+
+ return -EINVAL;
+}
+
+static int
+mixel_dphy_validate_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct phy_configure_opts_lvds *lvds_cfg = &opts->lvds;
+
+ if (lvds_cfg->bits_per_lane_and_dclk_cycle != 7) {
+ dev_err(&phy->dev, "Invalid bits per LVDS data lane: %u\n",
+ lvds_cfg->bits_per_lane_and_dclk_cycle);
+ return -EINVAL;
+ }
+
+ if (lvds_cfg->lanes != 4) {
+ dev_err(&phy->dev, "Invalid LVDS data lanes: %u\n", lvds_cfg->lanes);
+ return -EINVAL;
+ }
+
+ if (lvds_cfg->differential_clk_rate < MIN_LVDS_REFCLK_FREQ ||
+ lvds_cfg->differential_clk_rate > MAX_LVDS_REFCLK_FREQ) {
+ dev_err(&phy->dev,
+ "Invalid LVDS differential clock rate: %lu\n",
+ lvds_cfg->differential_clk_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mixel_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
{
- struct mixel_dphy_cfg cfg = { 0 };
+ if (mode == PHY_MODE_MIPI_DPHY) {
+ struct mixel_dphy_cfg mipi_dphy_cfg = { 0 };
- if (mode != PHY_MODE_MIPI_DPHY)
- return -EINVAL;
+ return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy,
+ &mipi_dphy_cfg);
+ } else if (mode == PHY_MODE_LVDS) {
+ return mixel_dphy_validate_lvds_phy(phy, opts);
+ }
- return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ dev_err(&phy->dev,
+ "Failed to validate PHY with invalid PHY mode: %d\n", mode);
+ return -EINVAL;
}
static int mixel_dphy_init(struct phy *phy)
@@ -373,27 +517,75 @@ static int mixel_dphy_exit(struct phy *phy)
return 0;
}
-static int mixel_dphy_power_on(struct phy *phy)
+static int mixel_dphy_power_on_mipi_dphy(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
u32 locked;
int ret;
- ret = clk_prepare_enable(priv->phy_ref_clk);
- if (ret < 0)
- return ret;
-
phy_write(phy, PWR_ON, DPHY_PD_PLL);
ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
locked, PLL_LOCK_SLEEP,
PLL_LOCK_TIMEOUT);
if (ret < 0) {
dev_err(&phy->dev, "Could not get DPHY lock (%d)!\n", ret);
- goto clock_disable;
+ return ret;
}
phy_write(phy, PWR_ON, DPHY_PD_DPHY);
return 0;
+}
+
+static int mixel_dphy_power_on_lvds_phy(struct phy *phy)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ u32 locked;
+ int ret;
+
+ regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, LVDS_EN);
+
+ phy_write(phy, PWR_ON, DPHY_PD_DPHY);
+ phy_write(phy, PWR_ON, DPHY_PD_PLL);
+
+ /* do not wait for slave LVDS PHY being locked */
+ if (priv->is_slave)
+ return 0;
+
+ ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
+ locked, PLL_LOCK_SLEEP,
+ PLL_LOCK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&phy->dev, "Could not get LVDS PHY lock (%d)!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mixel_dphy_power_on(struct phy *phy)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->phy_ref_clk);
+ if (ret < 0)
+ return ret;
+
+ if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) {
+ ret = mixel_dphy_power_on_mipi_dphy(phy);
+ } else if (phy->attrs.mode == PHY_MODE_LVDS) {
+ ret = mixel_dphy_power_on_lvds_phy(phy);
+ } else {
+ dev_err(&phy->dev,
+ "Failed to power on PHY with invalid PHY mode: %d\n",
+ phy->attrs.mode);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto clock_disable;
+
+ return 0;
clock_disable:
clk_disable_unprepare(priv->phy_ref_clk);
return ret;
@@ -406,16 +598,51 @@ static int mixel_dphy_power_off(struct phy *phy)
phy_write(phy, PWR_OFF, DPHY_PD_PLL);
phy_write(phy, PWR_OFF, DPHY_PD_DPHY);
+ if (phy->attrs.mode == PHY_MODE_LVDS)
+ regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, 0);
+
clk_disable_unprepare(priv->phy_ref_clk);
return 0;
}
+static int mixel_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (priv->devdata->is_combo && mode != PHY_MODE_LVDS) {
+ dev_err(&phy->dev, "Failed to set PHY mode for combo PHY\n");
+ return -EINVAL;
+ }
+
+ if (!priv->devdata->is_combo && mode != PHY_MODE_MIPI_DPHY) {
+ dev_err(&phy->dev, "Failed to set PHY mode to MIPI DPHY\n");
+ return -EINVAL;
+ }
+
+ if (priv->devdata->is_combo) {
+ u32 rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+
+ ret = imx_sc_misc_set_control(priv->ipc_handle,
+ rsc, IMX_SC_C_MODE,
+ mode == PHY_MODE_LVDS);
+ if (ret) {
+ dev_err(&phy->dev,
+ "Failed to set PHY mode via SCU ipc: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static const struct phy_ops mixel_dphy_phy_ops = {
.init = mixel_dphy_init,
.exit = mixel_dphy_exit,
.power_on = mixel_dphy_power_on,
.power_off = mixel_dphy_power_off,
+ .set_mode = mixel_dphy_set_mode,
.configure = mixel_dphy_configure,
.validate = mixel_dphy_validate,
.owner = THIS_MODULE,
@@ -424,6 +651,8 @@ static const struct phy_ops mixel_dphy_phy_ops = {
static const struct of_device_id mixel_dphy_of_match[] = {
{ .compatible = "fsl,imx8mq-mipi-dphy",
.data = &mixel_dphy_devdata[MIXEL_IMX8MQ] },
+ { .compatible = "fsl,imx8qxp-mipi-dphy",
+ .data = &mixel_dphy_devdata[MIXEL_IMX8QXP] },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mixel_dphy_of_match);
@@ -436,6 +665,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
struct mixel_dphy_priv *priv;
struct phy *phy;
void __iomem *base;
+ int ret;
if (!np)
return -ENODEV;
@@ -467,6 +697,30 @@ static int mixel_dphy_probe(struct platform_device *pdev)
dev_dbg(dev, "phy_ref clock rate: %lu\n",
clk_get_rate(priv->phy_ref_clk));
+ if (priv->devdata->is_combo) {
+ priv->lvds_regmap =
+ syscon_regmap_lookup_by_phandle(np, "fsl,syscon");
+ if (IS_ERR(priv->lvds_regmap)) {
+ ret = PTR_ERR(priv->lvds_regmap);
+ dev_err_probe(dev, ret, "Failed to get LVDS regmap\n");
+ return ret;
+ }
+
+ priv->id = of_alias_get_id(np, "mipi_dphy");
+ if (priv->id < 0) {
+ dev_err(dev, "Failed to get phy node alias id: %d\n",
+ priv->id);
+ return priv->id;
+ }
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "Failed to get SCU ipc handle\n");
+ return ret;
+ }
+ }
+
dev_set_drvdata(dev, priv);
phy = devm_phy_create(dev, np, &mixel_dphy_phy_ops);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index f1eb03ba25d6..ad7d2edfc414 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -94,15 +94,21 @@ static int imx8_pcie_phy_init(struct phy *phy)
IMX8MM_GPR_PCIE_CMN_RST);
usleep_range(200, 500);
- if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) {
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
+ pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
/* Configure the pad as input */
val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
- } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) {
+ } else {
/* Configure the PHY to output the refclock via pad */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ }
+
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT ||
+ pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
+ /* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 5fb4217fb8e0..d4bd419abc3c 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -120,20 +120,16 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
return PTR_ERR(hdmi_phy->regs);
ref_clk = devm_clk_get(dev, "pll_ref");
- if (IS_ERR(ref_clk)) {
- ret = PTR_ERR(ref_clk);
- dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(dev, PTR_ERR(ref_clk),
+ "Failed to get PLL reference clock\n");
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
- if (ret < 0) {
- dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
hdmi_phy->dev = dev;
hdmi_phy->conf =
@@ -141,25 +137,19 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
- if (IS_ERR(hdmi_phy->pll)) {
- ret = PTR_ERR(hdmi_phy->pll);
- dev_err(dev, "Failed to register PLL: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hdmi_phy->pll))
+ return dev_err_probe(dev, PTR_ERR(hdmi_phy->pll),
+ "Failed to register PLL\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
&hdmi_phy->ibias);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get ibias\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
&hdmi_phy->ibias_up);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get ibias_up\n");
dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
hdmi_phy->drv_imp_clk = 0x30;
@@ -168,17 +158,15 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
hdmi_phy->drv_imp_d0 = 0x30;
phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
- if (IS_ERR(phy)) {
- dev_err(dev, "Failed to create HDMI PHY\n");
- return PTR_ERR(phy);
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy), "Cannot create HDMI PHY\n");
+
phy_set_drvdata(phy, hdmi_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- dev_err(dev, "Failed to register HDMI PHY\n");
- return PTR_ERR(phy_provider);
- }
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(dev, PTR_ERR(phy_provider),
+ "Failed to register HDMI PHY\n");
if (hdmi_phy->conf->pll_default_off)
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 67b005d5b9e3..28506932bd91 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -154,11 +154,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return PTR_ERR(mipi_tx->regs);
ref_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ref_clk)) {
- ret = PTR_ERR(ref_clk);
- dev_err(dev, "Failed to get reference clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(dev, PTR_ERR(ref_clk),
+ "Failed to get reference clock\n");
ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
&mipi_tx->mipitx_drive);
@@ -178,27 +176,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
- if (ret < 0) {
- dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
- if (IS_ERR(mipi_tx->pll)) {
- ret = PTR_ERR(mipi_tx->pll);
- dev_err(dev, "Failed to register PLL: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(mipi_tx->pll))
+ return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n");
phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy), "Failed to create MIPI D-PHY\n");
+
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 6f3fe37dee0e..95c6dbb52da7 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -10,6 +10,7 @@
#include<linux/module.h>
#include<linux/gpio.h>
#include<linux/gpio/consumer.h>
+#include <linux/mux/consumer.h>
struct can_transceiver_data {
u32 flags;
@@ -21,13 +22,22 @@ struct can_transceiver_phy {
struct phy *generic_phy;
struct gpio_desc *standby_gpio;
struct gpio_desc *enable_gpio;
+ struct mux_state *mux_state;
};
/* Power on function */
static int can_transceiver_phy_power_on(struct phy *phy)
{
struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
-
+ int ret;
+
+ if (can_transceiver_phy->mux_state) {
+ ret = mux_state_select(can_transceiver_phy->mux_state);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to select CAN mux: %d\n", ret);
+ return ret;
+ }
+ }
if (can_transceiver_phy->standby_gpio)
gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0);
if (can_transceiver_phy->enable_gpio)
@@ -45,6 +55,8 @@ static int can_transceiver_phy_power_off(struct phy *phy)
gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1);
if (can_transceiver_phy->enable_gpio)
gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0);
+ if (can_transceiver_phy->mux_state)
+ mux_state_deselect(can_transceiver_phy->mux_state);
return 0;
}
@@ -95,6 +107,16 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
drvdata = match->data;
+ if (of_property_read_bool(dev->of_node, "mux-states")) {
+ struct mux_state *mux_state;
+
+ mux_state = devm_mux_state_get(dev, NULL);
+ if (IS_ERR(mux_state))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mux_state),
+ "failed to get mux\n");
+ can_transceiver_phy->mux_state = mux_state;
+ }
+
phy = devm_phy_create(dev, dev->of_node,
&can_transceiver_phy_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 91e28d6ce450..d93ddf1262c5 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -229,6 +229,17 @@ void phy_pm_runtime_forbid(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
+/**
+ * phy_init - phy internal initialization before phy operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Used to allow phy's driver to perform phy internal initialization,
+ * such as PLL block powering, clock initialization or anything that's
+ * is required by the phy to perform the start of operation.
+ * Must be called before phy_power_on().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_init(struct phy *phy)
{
int ret;
@@ -242,6 +253,9 @@ int phy_init(struct phy *phy)
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex);
+ if (phy->power_count > phy->init_count)
+ dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
+
if (phy->init_count == 0 && phy->ops->init) {
ret = phy->ops->init(phy);
if (ret < 0) {
@@ -258,6 +272,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_init);
+/**
+ * phy_exit - Phy internal un-initialization
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_power_off().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_exit(struct phy *phy)
{
int ret;
@@ -287,6 +309,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_exit);
+/**
+ * phy_power_on - Enable the phy and enter proper operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_init().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_power_on(struct phy *phy)
{
int ret = 0;
@@ -329,6 +359,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_power_on);
+/**
+ * phy_power_off - Disable the phy.
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called before phy_exit().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_power_off(struct phy *phy)
{
int ret;
@@ -432,7 +470,7 @@ EXPORT_SYMBOL_GPL(phy_reset);
* runtime, which are otherwise lost after host controller reset and cannot
* be applied in phy_init() or phy_power_on().
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_calibrate(struct phy *phy)
{
@@ -458,7 +496,7 @@ EXPORT_SYMBOL_GPL(phy_calibrate);
* on the phy. The configuration will be applied on the current phy
* mode, that can be changed using phy_set_mode().
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
@@ -492,7 +530,7 @@ EXPORT_SYMBOL_GPL(phy_configure);
* PHY, so calling it as many times as deemed fit will have no side
* effect.
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index b144ae1f729a..c7309e981bfb 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2535,6 +2535,50 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
@@ -3177,7 +3221,7 @@ struct qmp_phy_combo_cfg {
* @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
- * @pipe_clk: pipe lock
+ * @pipe_clk: pipe clock
* @index: lane index
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
@@ -4217,6 +4261,35 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pwrdn_delay_max = 1005, /* us */
};
+static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx65_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx65_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_sdx55_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8350_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.type = PHY_TYPE_UFS,
.nlanes = 2,
@@ -5012,7 +5085,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_reg_enable;
+ goto err_unlock;
}
for (i = 0; i < cfg->num_resets; i++) {
@@ -5020,7 +5093,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
if (ret) {
dev_err(qmp->dev, "%s reset assert failed\n",
cfg->reset_list[i]);
- goto err_rst_assert;
+ goto err_disable_regulators;
}
}
@@ -5029,13 +5102,13 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
if (ret) {
dev_err(qmp->dev, "%s reset deassert failed\n",
qphy->cfg->reset_list[i]);
- goto err_rst;
+ goto err_assert_reset;
}
}
ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
if (ret)
- goto err_rst;
+ goto err_assert_reset;
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
@@ -5077,12 +5150,12 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
return 0;
-err_rst:
+err_assert_reset:
while (++i < cfg->num_resets)
reset_control_assert(qmp->resets[i]);
-err_rst_assert:
+err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_reg_enable:
+err_unlock:
mutex_unlock(&qmp->phy_mutex);
return ret;
@@ -5188,14 +5261,14 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
if (ret) {
dev_err(qmp->dev, "lane%d reset deassert failed\n",
qphy->index);
- goto err_lane_rst;
+ return ret;
}
}
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
- goto err_clk_enable;
+ goto err_reset_lane;
}
/* Tx, Rx, and PCS configurations */
@@ -5246,7 +5319,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
- goto err_lane_rst;
+ goto err_disable_pipe_clk;
qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
cfg->pcs_misc_tbl_num);
@@ -5285,17 +5358,17 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
PHY_INIT_COMPLETE_TIMEOUT);
if (ret) {
dev_err(qmp->dev, "phy initialization timed-out\n");
- goto err_pcs_ready;
+ goto err_disable_pipe_clk;
}
}
return 0;
-err_pcs_ready:
+err_disable_pipe_clk:
clk_disable_unprepare(qphy->pipe_clk);
-err_clk_enable:
+err_reset_lane:
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
-err_lane_rst:
+
return ret;
}
@@ -5514,7 +5587,7 @@ static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg
struct reset_control *rst;
const char *name = cfg->reset_list[i];
- rst = devm_reset_control_get(dev, name);
+ rst = devm_reset_control_get_exclusive(dev, name);
if (IS_ERR(rst)) {
dev_err(dev, "failed to get %s reset\n", name);
return PTR_ERR(rst);
@@ -5818,6 +5891,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
.owner = THIS_MODULE,
};
+static void qcom_qmp_reset_control_put(void *data)
+{
+ reset_control_put(data);
+}
+
static
int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
@@ -5890,7 +5968,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
* all phys that don't need this.
*/
snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
if (IS_ERR(qphy->pipe_clk)) {
if (cfg->type == PHY_TYPE_PCIE ||
cfg->type == PHY_TYPE_USB3) {
@@ -5907,11 +5985,15 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
/* Get lane reset, if any */
if (cfg->has_lane_rst) {
snprintf(prop_name, sizeof(prop_name), "lane%d", id);
- qphy->lane_rst = of_reset_control_get(np, prop_name);
+ qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
if (IS_ERR(qphy->lane_rst)) {
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
}
+ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+ qphy->lane_rst);
+ if (ret)
+ return ret;
}
if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
@@ -6008,6 +6090,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm6115-qmp-ufs-phy",
.data = &sm6115_ufsphy_cfg,
}, {
+ .compatible = "qcom,sm6350-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
.compatible = "qcom,sm8150-qmp-ufs-phy",
.data = &sm8150_ufsphy_cfg,
}, {
@@ -6047,6 +6132,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sdx55-qmp-usb3-uni-phy",
.data = &sdx55_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
+ .data = &sdx65_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sm8350-qmp-usb3-phy",
.data = &sm8350_usb3phy_cfg,
}, {
diff --git a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
index 4df9476ef2a9..639452f47869 100644
--- a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
+++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
@@ -327,7 +327,6 @@ static int rk_dphy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct rk_dphy_drv_data *drv_data;
struct phy_provider *phy_provider;
- const struct of_device_id *of_id;
struct rk_dphy *priv;
struct phy *phy;
unsigned int i;
@@ -347,11 +346,7 @@ static int rk_dphy_probe(struct platform_device *pdev)
return -ENODEV;
}
- of_id = of_match_device(rk_dphy_dt_ids, dev);
- if (!of_id)
- return -EINVAL;
-
- drv_data = of_id->data;
+ drv_data = of_device_get_match_data(dev);
priv->drv_data = drv_data;
priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks,
sizeof(*priv->clks), GFP_KERNEL);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eca77e44a4c1..6711659f727c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -116,11 +116,15 @@ struct rockchip_chg_det_reg {
* @bvalid_det_en: vbus valid rise detection enable register.
* @bvalid_det_st: vbus valid rise detection status register.
* @bvalid_det_clr: vbus valid rise detection clear register.
+ * @id_det_en: id detection enable register.
+ * @id_det_st: id detection state register.
+ * @id_det_clr: id detection clear register.
* @ls_det_en: linestate detection enable register.
* @ls_det_st: linestate detection state register.
* @ls_det_clr: linestate detection clear register.
* @utmi_avalid: utmi vbus avalid status register.
* @utmi_bvalid: utmi vbus bvalid status register.
+ * @utmi_id: utmi id state register.
* @utmi_ls: utmi linestate state register.
* @utmi_hstdet: utmi host disconnect register.
*/
@@ -129,11 +133,15 @@ struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg bvalid_det_en;
struct usb2phy_reg bvalid_det_st;
struct usb2phy_reg bvalid_det_clr;
+ struct usb2phy_reg id_det_en;
+ struct usb2phy_reg id_det_st;
+ struct usb2phy_reg id_det_clr;
struct usb2phy_reg ls_det_en;
struct usb2phy_reg ls_det_st;
struct usb2phy_reg ls_det_clr;
struct usb2phy_reg utmi_avalid;
struct usb2phy_reg utmi_bvalid;
+ struct usb2phy_reg utmi_id;
struct usb2phy_reg utmi_ls;
struct usb2phy_reg utmi_hstdet;
};
@@ -161,6 +169,7 @@ struct rockchip_usb2phy_cfg {
* @suspended: phy suspended flag.
* @vbus_attached: otg device vbus status.
* @bvalid_irq: IRQ number assigned for vbus valid rise detection.
+ * @id_irq: IRQ number assigned for ID pin detection.
* @ls_irq: IRQ number assigned for linestate detection.
* @otg_mux_irq: IRQ number which multiplex otg-id/otg-bvalid/linestate
* irqs to one irq in otg-port.
@@ -179,6 +188,7 @@ struct rockchip_usb2phy_port {
bool suspended;
bool vbus_attached;
int bvalid_irq;
+ int id_irq;
int ls_irq;
int otg_mux_irq;
struct mutex mutex;
@@ -253,7 +263,7 @@ static inline bool property_enabled(struct regmap *base,
return false;
tmp = (orig & mask) >> reg->bitstart;
- return tmp == reg->enable;
+ return tmp != reg->disable;
}
static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
@@ -419,6 +429,19 @@ static int rockchip_usb2phy_init(struct phy *phy)
if (ret)
goto out;
+ /* clear id status and enable id detect irq */
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->id_det_clr,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->id_det_en,
+ true);
+ if (ret)
+ goto out;
+
schedule_delayed_work(&rport->otg_sm_work,
OTG_SCHEDULE_DELAY * 3);
} else {
@@ -905,27 +928,40 @@ static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
if (!property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
return IRQ_NONE;
- mutex_lock(&rport->mutex);
-
/* clear bvalid detect irq pending status */
property_enable(rphy->grf, &rport->port_cfg->bvalid_det_clr, true);
- mutex_unlock(&rport->mutex);
-
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
return IRQ_HANDLED;
}
-static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
{
struct rockchip_usb2phy_port *rport = data;
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ bool id;
- if (property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
- return rockchip_usb2phy_bvalid_irq(irq, data);
- else
+ if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st))
return IRQ_NONE;
+
+ /* clear id detect irq pending status */
+ property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true);
+
+ id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+ extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+
+ ret |= rockchip_usb2phy_bvalid_irq(irq, data);
+ ret |= rockchip_usb2phy_id_irq(irq, data);
+
+ return ret;
}
static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
@@ -940,8 +976,14 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
if (!rport->phy)
continue;
- /* Handle linestate irq for both otg port and host port */
- ret = rockchip_usb2phy_linestate_irq(irq, rport);
+ switch (rport->port_id) {
+ case USB2PHY_PORT_OTG:
+ ret |= rockchip_usb2phy_otg_mux_irq(irq, rport);
+ break;
+ case USB2PHY_PORT_HOST:
+ ret |= rockchip_usb2phy_linestate_irq(irq, rport);
+ break;
+ }
}
return ret;
@@ -1015,6 +1057,25 @@ static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
"failed to request otg-bvalid irq handle\n");
return ret;
}
+
+ rport->id_irq = of_irq_get_byname(child_np, "otg-id");
+ if (rport->id_irq < 0) {
+ dev_err(rphy->dev, "no otg-id irq provided\n");
+ ret = rport->id_irq;
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->id_irq,
+ NULL,
+ rockchip_usb2phy_id_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_id",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-id irq handle\n");
+ return ret;
+ }
}
break;
default:
@@ -1139,8 +1200,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
else {
rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(rphy->grf))
- return PTR_ERR(rphy->grf);
+ if (IS_ERR(rphy->grf))
+ return PTR_ERR(rphy->grf);
}
if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
@@ -1289,10 +1350,14 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
.bvalid_det_en = { 0x0680, 3, 3, 0, 1 },
.bvalid_det_st = { 0x0690, 3, 3, 0, 1 },
.bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
+ .id_det_en = { 0x0680, 6, 5, 0, 3 },
+ .id_det_st = { 0x0690, 6, 5, 0, 3 },
+ .id_det_clr = { 0x06a0, 6, 5, 0, 3 },
.ls_det_en = { 0x0680, 2, 2, 0, 1 },
.ls_det_st = { 0x0690, 2, 2, 0, 1 },
.ls_det_clr = { 0x06a0, 2, 2, 0, 1 },
.utmi_bvalid = { 0x0480, 4, 4, 0, 1 },
+ .utmi_id = { 0x0480, 1, 1, 0, 1 },
.utmi_ls = { 0x0480, 3, 2, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1345,14 +1410,18 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0100, 8, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x3020, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x3024, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x3028, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x3020, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x3024, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x3028, 3, 2, 0, 3 },
+ .id_det_en = { 0x3020, 5, 4, 0, 3 },
+ .id_det_st = { 0x3024, 5, 4, 0, 3 },
+ .id_det_clr = { 0x3028, 5, 4, 0, 3 },
.ls_det_en = { 0x3020, 0, 0, 0, 1 },
.ls_det_st = { 0x3024, 0, 0, 0, 1 },
.ls_det_clr = { 0x3028, 0, 0, 0, 1 },
.utmi_avalid = { 0x0120, 10, 10, 0, 1 },
.utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_id = { 0x0120, 6, 6, 0, 1 },
.utmi_ls = { 0x0120, 5, 4, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1388,14 +1457,18 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0100, 15, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x0110, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x0114, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x0110, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x0114, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x0118, 3, 2, 0, 3 },
+ .id_det_en = { 0x0110, 5, 4, 0, 3 },
+ .id_det_st = { 0x0114, 5, 4, 0, 3 },
+ .id_det_clr = { 0x0118, 5, 4, 0, 3 },
.ls_det_en = { 0x0110, 0, 0, 0, 1 },
.ls_det_st = { 0x0114, 0, 0, 0, 1 },
.ls_det_clr = { 0x0118, 0, 0, 0, 1 },
.utmi_avalid = { 0x0120, 10, 10, 0, 1 },
.utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_id = { 0x0120, 6, 6, 0, 1 },
.utmi_ls = { 0x0120, 5, 4, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1453,8 +1526,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 },
.bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
+ .id_det_en = { 0xe3c0, 5, 4, 0, 3 },
+ .id_det_st = { 0xe3e0, 5, 4, 0, 3 },
+ .id_det_clr = { 0xe3d0, 5, 4, 0, 3 },
.utmi_avalid = { 0xe2ac, 7, 7, 0, 1 },
.utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 },
+ .utmi_id = { 0xe2ac, 8, 8, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe458, 1, 0, 0x2, 0x1 },
@@ -1488,8 +1565,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 },
.bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+ .id_det_en = { 0xe3c0, 10, 9, 0, 3 },
+ .id_det_st = { 0xe3e0, 10, 9, 0, 3 },
+ .id_det_clr = { 0xe3d0, 10, 9, 0, 3 },
.utmi_avalid = { 0xe2ac, 10, 10, 0, 1 },
.utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 },
+ .utmi_id = { 0xe2ac, 11, 11, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe468, 1, 0, 0x2, 0x1 },
@@ -1512,11 +1593,15 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0000, 8, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x0080, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x0084, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x0080, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x0084, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x0088, 3, 2, 0, 3 },
+ .id_det_en = { 0x0080, 5, 4, 0, 3 },
+ .id_det_st = { 0x0084, 5, 4, 0, 3 },
+ .id_det_clr = { 0x0088, 5, 4, 0, 3 },
.utmi_avalid = { 0x00c0, 10, 10, 0, 1 },
.utmi_bvalid = { 0x00c0, 9, 9, 0, 1 },
+ .utmi_id = { 0x00c0, 6, 6, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
/* Select suspend control from controller */
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index d2bbdc96a167..d76440ae10ff 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1105,15 +1105,14 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct resource *res;
const struct rockchip_usb3phy_port_cfg *phy_cfgs;
- const struct of_device_id *match;
int index, ret;
tcphy = devm_kzalloc(dev, sizeof(*tcphy), GFP_KERNEL);
if (!tcphy)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
+ phy_cfgs = of_device_get_match_data(dev);
+ if (!phy_cfgs) {
dev_err(dev, "phy configs are not assigned!\n");
return -EINVAL;
}
@@ -1123,7 +1122,6 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
if (IS_ERR(tcphy->base))
return PTR_ERR(tcphy->base);
- phy_cfgs = match->data;
/* find out a proper config which can be matched with dt. */
index = 0;
while (phy_cfgs[index].reg) {
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 19746e658a6a..15219ed43ce9 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -865,17 +865,20 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->regmap_irqc = axp20x->regmap_irqc;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
- if (info->cable.edev == NULL) {
- dev_dbg(dev, "%s is not ready, probe deferred\n",
- AXP288_EXTCON_DEV_NAME);
- return -EPROBE_DEFER;
+ if (IS_ERR(info->cable.edev)) {
+ dev_err_probe(dev, PTR_ERR(info->cable.edev),
+ "extcon_get_extcon_dev(%s) failed\n",
+ AXP288_EXTCON_DEV_NAME);
+ return PTR_ERR(info->cable.edev);
}
if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) {
info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME);
- if (info->otg.cable == NULL) {
- dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
- return -EPROBE_DEFER;
+ if (IS_ERR(info->otg.cable)) {
+ dev_err_probe(dev, PTR_ERR(info->otg.cable),
+ "extcon_get_extcon_dev(%s) failed\n",
+ USB_HOST_EXTCON_NAME);
+ return PTR_ERR(info->otg.cable);
}
dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
}
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index d67edb760c94..92db79400a6a 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm,
cable->nb.notifier_call = charger_extcon_notifier;
cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name);
- if (IS_ERR_OR_NULL(cable->extcon_dev)) {
+ if (IS_ERR(cable->extcon_dev)) {
pr_err("Cannot find extcon_dev for %s (cable: %s)\n",
cable->extcon_name, cable->name);
- if (cable->extcon_dev == NULL)
- return -EPROBE_DEFER;
- else
- return PTR_ERR(cable->extcon_dev);
+ return PTR_ERR(cable->extcon_dev);
}
for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) {
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 127c73b0b3bd..1ec3535a257d 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -242,10 +242,10 @@ static int max8997_battery_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "couldn't get charger regulator\n");
}
charger->edev = extcon_get_extcon_dev("max8997-muic");
- if (IS_ERR_OR_NULL(charger->edev)) {
- if (!charger->edev)
- return -EPROBE_DEFER;
- dev_info(charger->dev, "couldn't get extcon device\n");
+ if (IS_ERR(charger->edev)) {
+ dev_err_probe(charger->dev, PTR_ERR(charger->edev),
+ "couldn't get extcon device: max8997-muic\n");
+ return PTR_ERR(charger->edev);
}
if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) {
diff --git a/drivers/power/supply/tosa_battery.c b/drivers/power/supply/tosa_battery.c
index 32cc31cd4761..73d4aca4c386 100644
--- a/drivers/power/supply/tosa_battery.c
+++ b/drivers/power/supply/tosa_battery.c
@@ -12,10 +12,9 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach-types.h>
-#include <mach/tosa.h>
static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
static struct work_struct bat_work;
@@ -28,22 +27,23 @@ struct tosa_bat {
struct mutex work_lock; /* protects data */
bool (*is_present)(struct tosa_bat *bat);
- int gpio_full;
- int gpio_charge_off;
+ struct gpio_desc *gpiod_full;
+ struct gpio_desc *gpiod_charge_off;
int technology;
- int gpio_bat;
+ struct gpio_desc *gpiod_bat;
int adc_bat;
int adc_bat_divider;
int bat_max;
int bat_min;
- int gpio_temp;
+ struct gpio_desc *gpiod_temp;
int adc_temp;
int adc_temp_divider;
};
+static struct gpio_desc *jacket_detect;
static struct tosa_bat tosa_bat_main;
static struct tosa_bat tosa_bat_jacket;
@@ -51,15 +51,15 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_bat < 0 || bat->adc_bat < 0)
+ if (!bat->gpiod_bat || bat->adc_bat < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_bat, 1);
+ gpiod_set_value(bat->gpiod_bat, 1);
msleep(5);
value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
bat->adc_bat);
- gpio_set_value(bat->gpio_bat, 0);
+ gpiod_set_value(bat->gpiod_bat, 0);
mutex_unlock(&bat_lock);
value = value * 1000000 / bat->adc_bat_divider;
@@ -71,15 +71,15 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_temp < 0 || bat->adc_temp < 0)
+ if (!bat->gpiod_temp || bat->adc_temp < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_temp, 1);
+ gpiod_set_value(bat->gpiod_temp, 1);
msleep(5);
value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
bat->adc_temp);
- gpio_set_value(bat->gpio_temp, 0);
+ gpiod_set_value(bat->gpiod_temp, 0);
mutex_unlock(&bat_lock);
value = value * 10000 / bat->adc_temp_divider;
@@ -136,7 +136,7 @@ static int tosa_bat_get_property(struct power_supply *psy,
static bool tosa_jacket_bat_is_present(struct tosa_bat *bat)
{
- return gpio_get_value(TOSA_GPIO_JACKET_DETECT) == 0;
+ return gpiod_get_value(jacket_detect) == 0;
}
static void tosa_bat_external_power_changed(struct power_supply *psy)
@@ -166,23 +166,23 @@ static void tosa_bat_update(struct tosa_bat *bat)
bat->full_chrg = -1;
} else if (power_supply_am_i_supplied(psy)) {
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- gpio_set_value(bat->gpio_charge_off, 0);
+ gpiod_set_value(bat->gpiod_charge_off, 0);
mdelay(15);
}
- if (gpio_get_value(bat->gpio_full)) {
+ if (gpiod_get_value(bat->gpiod_full)) {
if (old == POWER_SUPPLY_STATUS_CHARGING ||
bat->full_chrg == -1)
bat->full_chrg = tosa_read_bat(bat);
- gpio_set_value(bat->gpio_charge_off, 1);
+ gpiod_set_value(bat->gpiod_charge_off, 1);
bat->status = POWER_SUPPLY_STATUS_FULL;
} else {
- gpio_set_value(bat->gpio_charge_off, 0);
+ gpiod_set_value(bat->gpiod_charge_off, 0);
bat->status = POWER_SUPPLY_STATUS_CHARGING;
}
} else {
- gpio_set_value(bat->gpio_charge_off, 1);
+ gpiod_set_value(bat->gpiod_charge_off, 1);
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
@@ -251,18 +251,18 @@ static struct tosa_bat tosa_bat_main = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = TOSA_GPIO_BAT0_CRG,
- .gpio_charge_off = TOSA_GPIO_CHARGE_OFF,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = TOSA_GPIO_BAT0_V_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID3,
.adc_bat_divider = 414,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = TOSA_GPIO_BAT1_TH_ON,
+ .gpiod_temp = NULL,
.adc_temp = WM97XX_AUX_ID2,
.adc_temp_divider = 10000,
};
@@ -273,18 +273,18 @@ static struct tosa_bat tosa_bat_jacket = {
.psy = NULL,
.is_present = tosa_jacket_bat_is_present,
- .gpio_full = TOSA_GPIO_BAT1_CRG,
- .gpio_charge_off = TOSA_GPIO_CHARGE_OFF_JC,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = TOSA_GPIO_BAT1_V_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID3,
.adc_bat_divider = 414,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = TOSA_GPIO_BAT0_TH_ON,
+ .gpiod_temp = NULL,
.adc_temp = WM97XX_AUX_ID2,
.adc_temp_divider = 10000,
};
@@ -294,36 +294,20 @@ static struct tosa_bat tosa_bat_bu = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = -1,
- .gpio_charge_off = -1,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
- .gpio_bat = TOSA_GPIO_BU_CHRG_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID4,
.adc_bat_divider = 1266,
- .gpio_temp = -1,
+ .gpiod_temp = NULL,
.adc_temp = -1,
.adc_temp_divider = -1,
};
-static struct gpio tosa_bat_gpios[] = {
- { TOSA_GPIO_CHARGE_OFF, GPIOF_OUT_INIT_HIGH, "main charge off" },
- { TOSA_GPIO_CHARGE_OFF_JC, GPIOF_OUT_INIT_HIGH, "jacket charge off" },
- { TOSA_GPIO_BAT_SW_ON, GPIOF_OUT_INIT_LOW, "battery switch" },
- { TOSA_GPIO_BAT0_V_ON, GPIOF_OUT_INIT_LOW, "main battery" },
- { TOSA_GPIO_BAT1_V_ON, GPIOF_OUT_INIT_LOW, "jacket battery" },
- { TOSA_GPIO_BAT1_TH_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
- { TOSA_GPIO_BAT0_TH_ON, GPIOF_OUT_INIT_LOW, "jacket battery temp" },
- { TOSA_GPIO_BU_CHRG_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
- { TOSA_GPIO_BAT0_CRG, GPIOF_IN, "main battery full" },
- { TOSA_GPIO_BAT1_CRG, GPIOF_IN, "jacket battery full" },
- { TOSA_GPIO_BAT0_LOW, GPIOF_IN, "main battery low" },
- { TOSA_GPIO_BAT1_LOW, GPIOF_IN, "jacket battery low" },
- { TOSA_GPIO_JACKET_DETECT, GPIOF_IN, "jacket detect" },
-};
-
#ifdef CONFIG_PM
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
{
@@ -343,19 +327,83 @@ static int tosa_bat_resume(struct platform_device *dev)
#define tosa_bat_resume NULL
#endif
-static int tosa_bat_probe(struct platform_device *dev)
+static int tosa_bat_probe(struct platform_device *pdev)
{
int ret;
struct power_supply_config main_psy_cfg = {},
jacket_psy_cfg = {},
bu_psy_cfg = {};
+ struct device *dev = &pdev->dev;
+ struct gpio_desc *dummy;
if (!machine_is_tosa())
return -ENODEV;
- ret = gpio_request_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
- if (ret)
- return ret;
+ /* Main charging control GPIOs */
+ tosa_bat_main.gpiod_charge_off = devm_gpiod_get(dev, "main charge off", GPIOD_OUT_HIGH);
+ if (IS_ERR(tosa_bat_main.gpiod_charge_off))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_charge_off),
+ "no main charger GPIO\n");
+ tosa_bat_jacket.gpiod_charge_off = devm_gpiod_get(dev, "jacket charge off", GPIOD_OUT_HIGH);
+ if (IS_ERR(tosa_bat_jacket.gpiod_charge_off))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_charge_off),
+ "no jacket charger GPIO\n");
+
+ /* Per-battery output check (routes battery voltage to ADC) */
+ tosa_bat_main.gpiod_bat = devm_gpiod_get(dev, "main battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_main.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_bat),
+ "no main battery GPIO\n");
+ tosa_bat_jacket.gpiod_bat = devm_gpiod_get(dev, "jacket battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_jacket.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_bat),
+ "no jacket battery GPIO\n");
+ tosa_bat_bu.gpiod_bat = devm_gpiod_get(dev, "backup battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_bu.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_bu.gpiod_bat),
+ "no backup battery GPIO\n");
+
+ /* Battery full detect GPIOs (using PXA SoC GPIOs) */
+ tosa_bat_main.gpiod_full = devm_gpiod_get(dev, "main battery full", GPIOD_IN);
+ if (IS_ERR(tosa_bat_main.gpiod_full))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_full),
+ "no main battery full GPIO\n");
+ tosa_bat_jacket.gpiod_full = devm_gpiod_get(dev, "jacket battery full", GPIOD_IN);
+ if (IS_ERR(tosa_bat_jacket.gpiod_full))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_full),
+ "no jacket battery full GPIO\n");
+
+ /* Battery temperature GPIOs (routes thermistor voltage to ADC) */
+ tosa_bat_main.gpiod_temp = devm_gpiod_get(dev, "main battery temp", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_main.gpiod_temp))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_temp),
+ "no main battery temp GPIO\n");
+ tosa_bat_jacket.gpiod_temp = devm_gpiod_get(dev, "jacket battery temp", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_jacket.gpiod_temp))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_temp),
+ "no jacket battery temp GPIO\n");
+
+ /* Jacket detect GPIO */
+ jacket_detect = devm_gpiod_get(dev, "jacket detect", GPIOD_IN);
+ if (IS_ERR(jacket_detect))
+ return dev_err_probe(dev, PTR_ERR(jacket_detect),
+ "no jacket detect GPIO\n");
+
+ /* Battery low indication GPIOs (not used, we just request them) */
+ dummy = devm_gpiod_get(dev, "main battery low", GPIOD_IN);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no main battery low GPIO\n");
+ dummy = devm_gpiod_get(dev, "jacket battery low", GPIOD_IN);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no jacket battery low GPIO\n");
+
+ /* Battery switch GPIO (not used just requested) */
+ dummy = devm_gpiod_get(dev, "battery switch", GPIOD_OUT_LOW);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no battery switch GPIO\n");
mutex_init(&tosa_bat_main.work_lock);
mutex_init(&tosa_bat_jacket.work_lock);
@@ -363,7 +411,7 @@ static int tosa_bat_probe(struct platform_device *dev)
INIT_WORK(&bat_work, tosa_bat_work);
main_psy_cfg.drv_data = &tosa_bat_main;
- tosa_bat_main.psy = power_supply_register(&dev->dev,
+ tosa_bat_main.psy = power_supply_register(dev,
&tosa_bat_main_desc,
&main_psy_cfg);
if (IS_ERR(tosa_bat_main.psy)) {
@@ -372,7 +420,7 @@ static int tosa_bat_probe(struct platform_device *dev)
}
jacket_psy_cfg.drv_data = &tosa_bat_jacket;
- tosa_bat_jacket.psy = power_supply_register(&dev->dev,
+ tosa_bat_jacket.psy = power_supply_register(dev,
&tosa_bat_jacket_desc,
&jacket_psy_cfg);
if (IS_ERR(tosa_bat_jacket.psy)) {
@@ -381,28 +429,28 @@ static int tosa_bat_probe(struct platform_device *dev)
}
bu_psy_cfg.drv_data = &tosa_bat_bu;
- tosa_bat_bu.psy = power_supply_register(&dev->dev, &tosa_bat_bu_desc,
+ tosa_bat_bu.psy = power_supply_register(dev, &tosa_bat_bu_desc,
&bu_psy_cfg);
if (IS_ERR(tosa_bat_bu.psy)) {
ret = PTR_ERR(tosa_bat_bu.psy);
goto err_psy_reg_bu;
}
- ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG),
+ ret = request_irq(gpiod_to_irq(tosa_bat_main.gpiod_full),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"main full", &tosa_bat_main);
if (ret)
goto err_req_main;
- ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG),
+ ret = request_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"jacket full", &tosa_bat_jacket);
if (ret)
goto err_req_jacket;
- ret = request_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT),
+ ret = request_irq(gpiod_to_irq(jacket_detect),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"jacket detect", &tosa_bat_jacket);
@@ -411,9 +459,9 @@ static int tosa_bat_probe(struct platform_device *dev)
return 0;
}
- free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
err_req_jacket:
- free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+ free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
err_req_main:
power_supply_unregister(tosa_bat_bu.psy);
err_psy_reg_bu:
@@ -425,15 +473,14 @@ err_psy_reg_main:
/* see comment in tosa_bat_remove */
cancel_work_sync(&bat_work);
- gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return ret;
}
static int tosa_bat_remove(struct platform_device *dev)
{
- free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
- free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
- free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+ free_irq(gpiod_to_irq(jacket_detect), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
power_supply_unregister(tosa_bat_bu.psy);
power_supply_unregister(tosa_bat_jacket.psy);
@@ -445,7 +492,6 @@ static int tosa_bat_remove(struct platform_device *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
- gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return 0;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 79368a957d89..290c1f02da10 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -400,7 +400,8 @@ field##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t sz) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
- char *new, *old; \
+ const char *old; \
+ char *new; \
\
new = kstrndup(buf, sz, GFP_KERNEL); \
if (!new) \
@@ -592,24 +593,51 @@ static struct bus_type rpmsg_bus = {
.remove = rpmsg_dev_remove,
};
-int rpmsg_register_device(struct rpmsg_device *rpdev)
+/*
+ * A helper for registering rpmsg device with driver override and name.
+ * Drivers should not be using it, but instead rpmsg_register_device().
+ */
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
{
struct device *dev = &rpdev->dev;
int ret;
- dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+ if (driver_override)
+ strcpy(rpdev->id.name, driver_override);
+
+ dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
- rpdev->dev.bus = &rpmsg_bus;
+ dev->bus = &rpmsg_bus;
- ret = device_register(&rpdev->dev);
+ device_initialize(dev);
+ if (driver_override) {
+ ret = driver_set_override(dev, &rpdev->driver_override,
+ driver_override,
+ strlen(driver_override));
+ if (ret) {
+ dev_err(dev, "device_set_override failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = device_add(dev);
if (ret) {
- dev_err(dev, "device_register failed: %d\n", ret);
- put_device(&rpdev->dev);
+ dev_err(dev, "device_add failed: %d\n", ret);
+ kfree(rpdev->driver_override);
+ rpdev->driver_override = NULL;
+ put_device(dev);
}
return ret;
}
+EXPORT_SYMBOL(rpmsg_register_device_override);
+
+int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+ return rpmsg_register_device_override(rpdev, NULL);
+}
EXPORT_SYMBOL(rpmsg_register_device);
/*
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index d4b23fd019a8..a22cd4abe7d1 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -94,10 +94,7 @@ int rpmsg_release_channel(struct rpmsg_device *rpdev,
*/
static inline int rpmsg_ctrldev_register_device(struct rpmsg_device *rpdev)
{
- strcpy(rpdev->id.name, "rpmsg_ctrl");
- rpdev->driver_override = "rpmsg_ctrl";
-
- return rpmsg_register_device(rpdev);
+ return rpmsg_register_device_override(rpdev, "rpmsg_ctrl");
}
#endif
diff --git a/drivers/rpmsg/rpmsg_ns.c b/drivers/rpmsg/rpmsg_ns.c
index 762ff1ae279f..c70ad03ff2e9 100644
--- a/drivers/rpmsg/rpmsg_ns.c
+++ b/drivers/rpmsg/rpmsg_ns.c
@@ -20,12 +20,10 @@
*/
int rpmsg_ns_register_device(struct rpmsg_device *rpdev)
{
- strcpy(rpdev->id.name, "rpmsg_ns");
- rpdev->driver_override = "rpmsg_ns";
rpdev->src = RPMSG_NS_ADDR;
rpdev->dst = RPMSG_NS_ADDR;
- return rpmsg_register_device(rpdev);
+ return rpmsg_register_device_override(rpdev, "rpmsg_ns");
}
EXPORT_SYMBOL(rpmsg_ns_register_device);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index cf8119b6d320..eeacf480cf36 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -16,8 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <mach/hardware.h>
-
#include "rtc-sa1100.h"
#define RTC_DEF_DIVIDER (32768 - 1)
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 1cb9daf9c645..fa8df50bb49e 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -103,7 +103,11 @@ struct subchannel {
struct work_struct todo_work;
struct schib_config config;
u64 dma_mask;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fa8293335077..913b6ddd040b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -338,31 +338,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct subchannel *sch = to_subchannel(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = sch->driver_override;
- if (strlen(driver_override)) {
- sch->driver_override = driver_override;
- } else {
- kfree(driver_override);
- sch->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &sch->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index d35e7a3f7067..97e51c34e6cf 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -62,6 +62,7 @@ struct virtio_ccw_device {
unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
+ rwlock_t irq_lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
bool is_thinint;
@@ -970,6 +971,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->flags = 0;
ccw->count = sizeof(status);
ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
+ /* We use ssch for setting the status which is a serializing
+ * instruction that guarantees the memory writes have
+ * completed before ssch.
+ */
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
@@ -984,6 +989,30 @@ static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
return dev_name(&vcdev->cdev->dev);
}
+static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct airq_info *info = vcdev->airq_info;
+
+ if (info) {
+ /*
+ * This device uses adapter interrupts: synchronize with
+ * vring_interrupt() called by virtio_airq_handler()
+ * via the indicator area lock.
+ */
+ write_lock_irq(&info->lock);
+ write_unlock_irq(&info->lock);
+ } else {
+ /* This device uses classic interrupts: synchronize
+ * with vring_interrupt() called by
+ * virtio_ccw_int_handler() via the per-device
+ * irq_lock
+ */
+ write_lock_irq(&vcdev->irq_lock);
+ write_unlock_irq(&vcdev->irq_lock);
+ }
+}
+
static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
@@ -995,6 +1024,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
.find_vqs = virtio_ccw_find_vqs,
.del_vqs = virtio_ccw_del_vqs,
.bus_name = virtio_ccw_bus_name,
+ .synchronize_cbs = virtio_ccw_synchronize_cbs,
};
@@ -1106,6 +1136,8 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
+ /* Interrupts are disabled here */
+ read_lock(&vcdev->irq_lock);
for_each_set_bit(i, indicators(vcdev),
sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
@@ -1114,6 +1146,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
+ read_unlock(&vcdev->irq_lock);
if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
clear_bit(0, indicators2(vcdev));
@@ -1284,6 +1317,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ rwlock_init(&vcdev->irq_lock);
mutex_init(&vcdev->io_lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6e3a04107bb6..a9fe5152addd 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -500,7 +500,6 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
-source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 19814c26c908..2ad3bc052531 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -101,7 +101,6 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
-obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 429d64299fe9..f910e2553fbb 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -232,7 +232,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
*/
rq->req_stat = RS_PENDING;
if (test_bit(AF_DEGRADED_MODE, &a->flags))
- /* not suppported for now */;
+ /* not supported for now */;
else
build_flash_msg(a, rq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ac17e3a35d2c..6370cdbfba08 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2182,7 +2182,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
u16 len = sci_req_tx_bytes(ireq);
- /* likely non-error data underrrun, workaround missing
+ /* likely non-error data underrun, workaround missing
* d2h frame from the controller
*/
if (d2h->fis_type != FIS_REGD2H) {
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 092a971d066b..bbd1faf41e80 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
- lpfc_nvme.o lpfc_nvmet.o
+ lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b0775be31d5c..b1be0dd0337a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -671,6 +671,9 @@ int lpfc_vmid_cmd(struct lpfc_vport *vport,
int lpfc_vmid_hash_fn(const char *vmid, int len);
struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 748c53219986..7b8cf678abb5 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1736,6 +1736,28 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+#define PCI_VENDOR_ID_ATTO 0x117c
+#define PCI_DEVICE_ID_CLRY_16XE 0x0064
+#define PCI_DEVICE_ID_CLRY_161E 0x0063
+#define PCI_DEVICE_ID_CLRY_162E 0x0064
+#define PCI_DEVICE_ID_CLRY_164E 0x0065
+#define PCI_DEVICE_ID_CLRY_16XP 0x0094
+#define PCI_DEVICE_ID_CLRY_161P 0x00a0
+#define PCI_DEVICE_ID_CLRY_162P 0x0094
+#define PCI_DEVICE_ID_CLRY_164P 0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE 0x0094
+#define PCI_DEVICE_ID_CLRY_321E 0x00a2
+#define PCI_DEVICE_ID_CLRY_322E 0x00a3
+#define PCI_DEVICE_ID_CLRY_324E 0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP 0x00bb
+#define PCI_DEVICE_ID_CLRY_321P 0x00bc
+#define PCI_DEVICE_ID_CLRY_322P 0x00bd
+#define PCI_DEVICE_ID_CLRY_324P 0x00be
+#define PCI_DEVICE_ID_TLFC_2 0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2 0x4064
+#define PCI_DEVICE_ID_TLFC_3 0x0094
+#define PCI_DEVICE_ID_TLFC_3162 0x40a6
+#define PCI_DEVICE_ID_TLFC_3322 0x40a7
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 6a90e6e53d09..a1b9be245560 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -124,5 +124,35 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2bffaa681fcc..93b94c64518d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2415,6 +2415,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
}
/**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+ char *model = "<Unknown>";
+ int tbolt = 0;
+
+ switch (sub_dev_id) {
+ case PCI_DEVICE_ID_CLRY_161E:
+ model = "161E";
+ break;
+ case PCI_DEVICE_ID_CLRY_162E:
+ model = "162E";
+ break;
+ case PCI_DEVICE_ID_CLRY_164E:
+ model = "164E";
+ break;
+ case PCI_DEVICE_ID_CLRY_161P:
+ model = "161P";
+ break;
+ case PCI_DEVICE_ID_CLRY_162P:
+ model = "162P";
+ break;
+ case PCI_DEVICE_ID_CLRY_164P:
+ model = "164P";
+ break;
+ case PCI_DEVICE_ID_CLRY_321E:
+ model = "321E";
+ break;
+ case PCI_DEVICE_ID_CLRY_322E:
+ model = "322E";
+ break;
+ case PCI_DEVICE_ID_CLRY_324E:
+ model = "324E";
+ break;
+ case PCI_DEVICE_ID_CLRY_321P:
+ model = "321P";
+ break;
+ case PCI_DEVICE_ID_CLRY_322P:
+ model = "322P";
+ break;
+ case PCI_DEVICE_ID_CLRY_324P:
+ model = "324P";
+ break;
+ case PCI_DEVICE_ID_TLFC_2XX2:
+ model = "2XX2";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3162:
+ model = "3162";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3322:
+ model = "3322";
+ tbolt = 1;
+ break;
+ default:
+ model = "Unknown";
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79, "%s", model);
+
+ if (descp && descp[0] == '\0')
+ snprintf(descp, 255,
+ "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+ (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+ model,
+ phba->Port);
+}
+
+/**
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
* @phba: pointer to lpfc hba data structure.
* @mdp: pointer to the data structure to hold the derived model name.
@@ -2444,6 +2528,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
+ if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+ lpfc_get_atto_model_desc(phba, mdp, descp);
+ return;
+ }
+
if (phba->lmt & LMT_64Gb)
max_speed = 64;
else if (phba->lmt & LMT_32Gb)
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 5385f4de5523..335e90633933 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1279,6 +1279,19 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Words 13 14 15 are for PBDE support */
+ /* add the VMID tags as per switch response */
+ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+ }
+ }
+
pwqeq->vport = vport;
return 0;
}
@@ -1504,6 +1517,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u8 *uuid = NULL;
+ int err;
+ enum dma_data_direction iodir;
+#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1662,6 +1680,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+ if (uuid) {
+ if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+ iodir = DMA_TO_DEVICE;
+ else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+ iodir = DMA_FROM_DEVICE;
+ else
+ iodir = DMA_NONE;
+
+ err = lpfc_vmid_get_appid(vport, uuid, iodir,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_ncmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+ }
+ }
+#endif
+
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3b8afa9d3056..d43968203248 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -87,14 +87,6 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp);
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag);
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid);
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
@@ -5271,254 +5263,6 @@ void lpfc_poll_timeout(struct timer_list *t)
}
/*
- * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash: calculated hash value
- * @buf: uuid associated with the VE
- * Return the VMID entry associated with the UUID
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
- u32 hash, u8 *buf)
-{
- struct lpfc_vmid *vmp;
-
- hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
- if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
- return vmp;
- }
- return NULL;
-}
-
-/*
- * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash - calculated hash value
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- *
- * This routine will insert the newly acquired VMID entity in the hash table.
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp)
-{
- hash_add(vport->hash_table, &vmp->hnode, hash);
-}
-
-/*
- * lpfc_vmid_hash_fn - create a hash value of the UUID
- * @vmid: uuid associated with the VE
- * @len: length of the VMID string
- * Returns the calculated hash value
- */
-int lpfc_vmid_hash_fn(const char *vmid, int len)
-{
- int c;
- int hash = 0;
-
- if (len == 0)
- return 0;
- while (len--) {
- c = *vmid++;
- if (c >= 'A' && c <= 'Z')
- c += 'a' - 'A';
-
- hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
- (c >> LPFC_VMID_HASH_SHIFT)) * 19;
- }
-
- return hash & LPFC_VMID_HASH_MASK;
-}
-
-/*
- * lpfc_vmid_update_entry - update the vmid entry in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @cmd: address of scsi cmd descriptor
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- * @tag: VMID tag
- */
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag)
-{
- u64 *lta;
-
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
- else if (vport->phba->cfg_vmid_app_header)
- tag->app_id = vmp->un.app_id;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- vmp->io_wr_cnt++;
- else
- vmp->io_rd_cnt++;
-
- /* update the last access timestamp in the table */
- lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
- *lta = jiffies;
-}
-
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid)
-{
- u32 hash;
- struct lpfc_vmid *pvmid;
-
- if (vport->port_type == LPFC_PHYSICAL_PORT) {
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- } else {
- hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
- pvmid =
- lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
- vmid->host_vmid);
- if (pvmid)
- vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
- else
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- }
-}
-
-/*
- * lpfc_vmid_get_appid - get the VMID associated with the UUID
- * @vport: The virtual port for which this call is being executed.
- * @uuid: UUID associated with the VE
- * @cmd: address of scsi_cmd descriptor
- * @tag: VMID tag
- * Returns status of the function
- */
-static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
- scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
-{
- struct lpfc_vmid *vmp = NULL;
- int hash, len, rc = -EPERM, i;
-
- /* check if QFPA is complete */
- if (lpfc_vmid_is_type_priority_tag(vport) &&
- !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
- (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
- vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
- return -EAGAIN;
- }
-
- /* search if the UUID has already been mapped to the VMID */
- len = strlen(uuid);
- hash = lpfc_vmid_hash_fn(uuid, len);
-
- /* search for the VMID in the table */
- read_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* if found, check if its already registered */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- read_unlock(&vport->vmid_lock);
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- rc = 0;
- } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
- vmp->flag & LPFC_VMID_DE_REGISTER)) {
- /* else if register or dereg request has already been sent */
- /* Hence VMID tag will not be added for this I/O */
- read_unlock(&vport->vmid_lock);
- rc = -EBUSY;
- } else {
- /* The VMID was not found in the hashtable. At this point, */
- /* drop the read lock first before proceeding further */
- read_unlock(&vport->vmid_lock);
- /* start the process to obtain one as per the */
- /* type of the VMID indicated */
- write_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* while the read lock was released, in case the entry was */
- /* added by other context or is in process of being added */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- write_unlock(&vport->vmid_lock);
- return 0;
- } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
- write_unlock(&vport->vmid_lock);
- return -EBUSY;
- }
-
- /* else search and allocate a free slot in the hash table */
- if (vport->cur_vmid_cnt < vport->max_vmid) {
- for (i = 0; i < vport->max_vmid; i++) {
- vmp = vport->vmid + i;
- if (vmp->flag == LPFC_VMID_SLOT_FREE)
- break;
- }
- if (i == vport->max_vmid)
- vmp = NULL;
- } else {
- vmp = NULL;
- }
-
- if (!vmp) {
- write_unlock(&vport->vmid_lock);
- return -ENOMEM;
- }
-
- /* Add the vmid and register */
- lpfc_put_vmid_in_hashtable(vport, hash, vmp);
- vmp->vmid_len = len;
- memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
- vmp->io_rd_cnt = 0;
- vmp->io_wr_cnt = 0;
- vmp->flag = LPFC_VMID_SLOT_USED;
-
- vmp->delete_inactive =
- vport->vmid_inactivity_timeout ? 1 : 0;
-
- /* if type priority tag, get next available VMID */
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- lpfc_vmid_assign_cs_ctl(vport, vmp);
-
- /* allocate the per cpu variable for holding */
- /* the last access time stamp only if VMID is enabled */
- if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
- if (!vmp->last_io_time) {
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- write_unlock(&vport->vmid_lock);
-
- /* complete transaction with switch */
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- rc = lpfc_vmid_uvem(vport, vmp, true);
- else if (vport->phba->cfg_vmid_app_header)
- rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
- if (!rc) {
- write_lock(&vport->vmid_lock);
- vport->cur_vmid_cnt++;
- vmp->flag |= LPFC_VMID_REQ_REGISTER;
- write_unlock(&vport->vmid_lock);
- } else {
- write_lock(&vport->vmid_lock);
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- free_percpu(vmp->last_io_time);
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- /* finally, enable the idle timer once */
- if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
- mod_timer(&vport->phba->inactive_vmid_poll,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
- vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
- }
- }
- return rc;
-}
-
-/*
* lpfc_is_command_vm_io - get the UUID from blk cgroup
* @cmd: Pointer to scsi_cmnd data structure
* Returns UUID if present, otherwise NULL
@@ -5704,9 +5448,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
uuid = lpfc_is_command_vm_io(cmnd);
if (uuid) {
- err = lpfc_vmid_get_appid(vport, uuid, cmnd,
- (union lpfc_vmid_io_tag *)
- &cur_iocbq->vmid_tag);
+ err = lpfc_vmid_get_appid(vport, uuid,
+ cmnd->sc_data_direction,
+ (union lpfc_vmid_io_tag *)
+ &cur_iocbq->vmid_tag);
if (!err)
cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644
index 000000000000..f64ced04b912
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -0,0 +1,288 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+ enum dma_data_direction iodir,
+ struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else if (vport->phba->cfg_vmid_app_header)
+ tag->app_id = vmp->un.app_id;
+
+ if (iodir == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else if (iodir == DMA_FROM_DEVICE)
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = -EPERM, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) &&
+ !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+ (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = __alloc_percpu(sizeof(u64),
+ __alignof__(struct
+ lpfc_vmid));
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else if (vport->phba->cfg_vmid_app_header)
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 01cd01787b0f..0e1cb4aa4ca2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -954,7 +954,7 @@ struct mpi3mr_ioc {
u16 active_poll_qcount;
u16 requested_poll_qcount;
- struct device *bsg_dev;
+ struct device bsg_dev;
struct request_queue *bsg_queue;
u8 stop_bsgs;
u8 *logdata_buf;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 9ab1762468ad..9baac224b213 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -1487,28 +1487,28 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
*/
void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
{
+ struct device *bsg_dev = &mrioc->bsg_dev;
if (!mrioc->bsg_queue)
return;
bsg_remove_queue(mrioc->bsg_queue);
mrioc->bsg_queue = NULL;
- device_del(mrioc->bsg_dev);
- put_device(mrioc->bsg_dev);
- kfree(mrioc->bsg_dev);
+ device_del(bsg_dev);
+ put_device(bsg_dev);
}
/**
* mpi3mr_bsg_node_release -release bsg device node
* @dev: bsg device node
*
- * decrements bsg dev reference count
+ * decrements bsg dev parent reference count
*
* Return:Nothing
*/
static void mpi3mr_bsg_node_release(struct device *dev)
{
- put_device(dev);
+ put_device(dev->parent);
}
/**
@@ -1521,41 +1521,37 @@ static void mpi3mr_bsg_node_release(struct device *dev)
*/
void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
{
- mrioc->bsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!mrioc->bsg_dev) {
- ioc_err(mrioc, "bsg device mem allocation failed\n");
- return;
- }
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ struct device *parent = &mrioc->shost->shost_gendev;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = mpi3mr_bsg_node_release;
- device_initialize(mrioc->bsg_dev);
- dev_set_name(mrioc->bsg_dev, "mpi3mrctl%u", mrioc->id);
+ dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
- if (device_add(mrioc->bsg_dev)) {
+ if (device_add(bsg_dev)) {
ioc_err(mrioc, "%s: bsg device add failed\n",
- dev_name(mrioc->bsg_dev));
- goto err_device_add;
+ dev_name(bsg_dev));
+ put_device(bsg_dev);
+ return;
}
- mrioc->bsg_dev->release = mpi3mr_bsg_node_release;
-
- mrioc->bsg_queue = bsg_setup_queue(mrioc->bsg_dev, dev_name(mrioc->bsg_dev),
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
mpi3mr_bsg_request, NULL, 0);
if (IS_ERR(mrioc->bsg_queue)) {
ioc_err(mrioc, "%s: bsg registration failed\n",
- dev_name(mrioc->bsg_dev));
- goto err_setup_queue;
+ dev_name(bsg_dev));
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+ return;
}
blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
return;
-
-err_setup_queue:
- device_del(mrioc->bsg_dev);
- put_device(mrioc->bsg_dev);
-err_device_add:
- kfree(mrioc->bsg_dev);
}
/**
@@ -1693,7 +1689,7 @@ logging_level_store(struct device *dev,
static DEVICE_ATTR_RW(logging_level);
/**
- * adapter_state_show - SysFS callback for adapter state show
+ * adp_state_show() - SysFS callback for adapter state show
* @dev: class device
* @attr: Device attributes
* @buf: Buffer to copy
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 71585528e8db..e885c1dbf61f 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1239,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -3413,9 +3414,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 3d5cd337a2a6..bfce60183a6e 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1434,7 +1434,7 @@ static int pmcraid_notify_aen(
return -EINVAL;
}
- /* send genetlink multicast message to notify appplications */
+ /* send genetlink multicast message to notify applications */
genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&pmcraid_event_family, skb,
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e57cc22453d0..4750ec5789a8 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -893,7 +893,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- /* Record LUN number for later use if we neeed them */
+ /* Record LUN number for later use if we need them */
io_req->lun = (int)sc_cmd->device->lun;
/* Obtain free SQE */
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 0ab595c0870a..1e7f4d138e06 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4037,7 +4037,6 @@ qla1280_setup(char *s)
{
char *cp, *ptr;
unsigned long val;
- int toke;
cp = s;
@@ -4052,7 +4051,7 @@ qla1280_setup(char *s)
} else
val = simple_strtoul(ptr, &ptr, 0);
- switch ((toke = qla1280_get_token(cp))) {
+ switch (qla1280_get_token(cp)) {
case TOKEN_NVRAM:
if (!val)
driver_setup.no_nvram = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index e6b5c4ccce97..346d47b61c07 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -591,7 +591,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
}
kfree(req->outstanding_cmds);
kfree(req);
- req = NULL;
}
static void
@@ -617,7 +616,6 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
- rsp = NULL;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a02235a6a8e9..cb97f625970d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -48,13 +48,6 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 0;
-module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
- "For Dual Mode (qlini_mode=dual), this parameter determines "
- "the percentage of exchanges/cmds FW will allocate resources "
- "for Target mode.");
-
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cdaca13ac1f1..49ef864df581 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
scmd->cmnd[5] = 0;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ scmd->allowed = 5;
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- scmd->allowed = 5;
+ req->end_io = eh_lock_door_done;
- blk_execute_rq_nowait(req, true, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9db7da0c79c..6ffc9e4258a8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -779,7 +779,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_AGAIN;
+ blk_stat = BLK_STS_TRANSPORT;
fallthrough;
default:
action = ACTION_FAIL;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 546a9e3cfbec..43949798a2e4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -573,7 +573,6 @@ struct bus_type scsi_bus_type = {
.pm = &scsi_bus_pm_ops,
#endif
};
-EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void)
{
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 749316462075..895b56c8f25e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3521,7 +3521,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
put_device(&sdkp->disk_dev);
- blk_cleanup_disk(gd);
+ put_disk(gd);
goto out;
}
@@ -3542,7 +3542,6 @@ static int sd_probe(struct device *dev)
out_put:
put_disk(gd);
out_free:
- sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
@@ -3579,7 +3578,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
ida_free(&sd_index_ida, sdkp->index);
- sd_zbc_release_disk(sdkp);
+ sd_zbc_free_zone_info(sdkp);
put_device(&sdkp->device->sdev_gendev);
free_opal_dev(sdkp->opal_dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2abad54fd23f..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -241,7 +241,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
-void sd_zbc_release_disk(struct scsi_disk *sdkp);
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
@@ -256,7 +256,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
#else /* CONFIG_BLK_DEV_ZONED */
-static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
+static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 5b9fad70aa88..6acc4f406eb8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -786,8 +786,11 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
return 0;
}
-static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
{
+ if (!sdkp->zone_wp_update_buf)
+ return;
+
/* Serialize against revalidate zones */
mutex_lock(&sdkp->rev_mutex);
@@ -802,12 +805,6 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
mutex_unlock(&sdkp->rev_mutex);
}
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
-{
- if (sd_is_zoned(sdkp))
- sd_zbc_clear_zone_info(sdkp);
-}
-
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
@@ -914,12 +911,15 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
u32 zone_blocks = 0;
int ret;
- if (!sd_is_zoned(sdkp))
+ if (!sd_is_zoned(sdkp)) {
/*
- * Device managed or normal SCSI disk,
- * no special handling required
+ * Device managed or normal SCSI disk, no special handling
+ * required. Nevertheless, free the disk zone information in
+ * case the device type changed.
*/
+ sd_zbc_free_zone_info(sdkp);
return 0;
+ }
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
@@ -928,11 +928,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
if (!blk_queue_is_zoned(q)) {
/*
* This can happen for a host aware disk with partitions.
- * The block device zone information was already cleared
- * by blk_queue_set_zoned(). Only clear the scsi disk zone
+ * The block device zone model was already cleared by
+ * blk_queue_set_zoned(). Only free the scsi disk zone
* information and exit early.
*/
- sd_zbc_clear_zone_info(sdkp);
+ sd_zbc_free_zone_info(sdkp);
return 0;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cbffa712b9f3..118c7b4a8af2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
+ srp->rq->end_io = sg_rq_end_io;
+ blk_execute_rq_nowait(srp->rq, at_head);
return 0;
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index c4c48272d8ad..2e40320129c0 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1082,7 +1082,7 @@ struct pqi_stream_data {
};
struct pqi_scsi_dev {
- int devtype; /* as reported by INQUIRY commmand */
+ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
/* BMIC_IDENTIFY_PHYSICAL_DEVICE */
/* only valid for devtype = TYPE_DISK */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 56a093a90b92..850172a2b8f1 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
scmd->allowed = retries;
+ req->end_io = st_scsi_execute_end;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(req, true, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true);
return 0;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 08ed059a738b..ca3530982e52 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -479,7 +479,7 @@ static void storvsc_host_scan(struct work_struct *work)
host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
- * currrently known devices have been hot removed. We issue a
+ * currently known devices have been hot removed. We issue a
* "unit ready" command against all currently known devices.
* This I/O will result in an error for devices that have been
* removed. As part of handling the I/O error, we remove the device.
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
deleted file mode 100644
index 1bba3fead2ce..000000000000
--- a/drivers/scsi/ufs/ufs.h
+++ /dev/null
@@ -1,623 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFS_H
-#define _UFS_H
-
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/scsi/scsi_bsg_ufs.h>
-
-#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
-#define QUERY_DESC_MAX_SIZE 255
-#define QUERY_DESC_MIN_SIZE 2
-#define QUERY_DESC_HDR_SIZE 2
-#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
- (sizeof(struct utp_upiu_header)))
-#define UFS_SENSE_SIZE 18
-
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
-/*
- * UFS device may have standard LUs and LUN id could be from 0x00 to
- * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
- * UFS device may also have the Well Known LUs (also referred as W-LU)
- * which again could be from 0x00 to 0x7F. For W-LUs, device only use
- * the "Extended Addressing Format" which means the W-LUNs would be
- * from 0xc100 (SCSI_W_LUN_BASE) onwards.
- * This means max. LUN number reported from UFS device could be 0xC17F.
- */
-#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
-#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
-#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_RPMB_UNIT 0xC4
-
-/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
-#define UFS_UPIU_MAX_WB_LUN_ID 8
-
-/*
- * WriteBooster buffer lifetime has a limit setted by vendor.
- * If it is over the limit, WriteBooster feature will be disabled.
- */
-#define UFS_WB_EXCEED_LIFETIME 0x0B
-
-/* Well known logical unit id in LUN field of UPIU */
-enum {
- UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
- UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
- UFS_UPIU_BOOT_WLUN = 0xB0,
- UFS_UPIU_RPMB_WLUN = 0xC4,
-};
-
-/*
- * UFS Protocol Information Unit related definitions
- */
-
-/* Task management functions */
-enum {
- UFS_ABORT_TASK = 0x01,
- UFS_ABORT_TASK_SET = 0x02,
- UFS_CLEAR_TASK_SET = 0x04,
- UFS_LOGICAL_RESET = 0x08,
- UFS_QUERY_TASK = 0x80,
- UFS_QUERY_TASK_SET = 0x81,
-};
-
-/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
- UPIU_TRANSACTION_NOP_OUT = 0x00,
- UPIU_TRANSACTION_COMMAND = 0x01,
- UPIU_TRANSACTION_DATA_OUT = 0x02,
- UPIU_TRANSACTION_TASK_REQ = 0x04,
- UPIU_TRANSACTION_QUERY_REQ = 0x16,
-};
-
-/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
- UPIU_TRANSACTION_NOP_IN = 0x20,
- UPIU_TRANSACTION_RESPONSE = 0x21,
- UPIU_TRANSACTION_DATA_IN = 0x22,
- UPIU_TRANSACTION_TASK_RSP = 0x24,
- UPIU_TRANSACTION_READY_XFER = 0x31,
- UPIU_TRANSACTION_QUERY_RSP = 0x36,
- UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
-};
-
-/* UPIU Read/Write flags */
-enum {
- UPIU_CMD_FLAGS_NONE = 0x00,
- UPIU_CMD_FLAGS_WRITE = 0x20,
- UPIU_CMD_FLAGS_READ = 0x40,
-};
-
-/* UPIU Task Attributes */
-enum {
- UPIU_TASK_ATTR_SIMPLE = 0x00,
- UPIU_TASK_ATTR_ORDERED = 0x01,
- UPIU_TASK_ATTR_HEADQ = 0x02,
- UPIU_TASK_ATTR_ACA = 0x03,
-};
-
-/* UPIU Query request function */
-enum {
- UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
- UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
-};
-
-/* Flag idn for Query Requests*/
-enum flag_idn {
- QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
- QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
- QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
- QUERY_FLAG_IDN_BKOPS_EN = 0x04,
- QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
- QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
- QUERY_FLAG_IDN_RESERVED2 = 0x07,
- QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
- QUERY_FLAG_IDN_BUSY_RTC = 0x09,
- QUERY_FLAG_IDN_RESERVED3 = 0x0A,
- QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
- QUERY_FLAG_IDN_WB_EN = 0x0E,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
- QUERY_FLAG_IDN_HPB_RESET = 0x11,
- QUERY_FLAG_IDN_HPB_EN = 0x12,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
- QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
- QUERY_ATTR_IDN_POWER_MODE = 0x02,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
- QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
- QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
- QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
- QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
- QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
- QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
- QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
- QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
- QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
- QUERY_ATTR_IDN_EE_STATUS = 0x0E,
- QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
- QUERY_ATTR_IDN_CNTX_CONF = 0x10,
- QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
- QUERY_ATTR_IDN_RESERVED2 = 0x12,
- QUERY_ATTR_IDN_RESERVED3 = 0x13,
- QUERY_ATTR_IDN_FFU_STATUS = 0x14,
- QUERY_ATTR_IDN_PSA_STATE = 0x15,
- QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
- QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
- QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
- QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
- QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
- QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
- QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
- QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
- QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURATION = 0x1,
- QUERY_DESC_IDN_UNIT = 0x2,
- QUERY_DESC_IDN_RFU_0 = 0x3,
- QUERY_DESC_IDN_INTERCONNECT = 0x4,
- QUERY_DESC_IDN_STRING = 0x5,
- QUERY_DESC_IDN_RFU_1 = 0x6,
- QUERY_DESC_IDN_GEOMETRY = 0x7,
- QUERY_DESC_IDN_POWER = 0x8,
- QUERY_DESC_IDN_HEALTH = 0x9,
- QUERY_DESC_IDN_MAX,
-};
-
-enum desc_header_offset {
- QUERY_DESC_LENGTH_OFFSET = 0x00,
- QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
-};
-
-/* Unit descriptor parameters offsets in bytes*/
-enum unit_desc_param {
- UNIT_DESC_PARAM_LEN = 0x0,
- UNIT_DESC_PARAM_TYPE = 0x1,
- UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
- UNIT_DESC_PARAM_LU_ENABLE = 0x3,
- UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
- UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
- UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
- UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
- UNIT_DESC_PARAM_MEM_TYPE = 0x8,
- UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
- UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
- UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
- UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
- UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
- UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
- UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
- UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
- UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
- UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
- UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
- UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
-};
-
-/* Device descriptor parameters offsets in bytes*/
-enum device_desc_param {
- DEVICE_DESC_PARAM_LEN = 0x0,
- DEVICE_DESC_PARAM_TYPE = 0x1,
- DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
- DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
- DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
- DEVICE_DESC_PARAM_PRTCL = 0x5,
- DEVICE_DESC_PARAM_NUM_LU = 0x6,
- DEVICE_DESC_PARAM_NUM_WLU = 0x7,
- DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
- DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
- DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
- DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
- DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
- DEVICE_DESC_PARAM_SEC_LU = 0xD,
- DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
- DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
- DEVICE_DESC_PARAM_SPEC_VER = 0x10,
- DEVICE_DESC_PARAM_MANF_DATE = 0x12,
- DEVICE_DESC_PARAM_MANF_NAME = 0x14,
- DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
- DEVICE_DESC_PARAM_SN = 0x16,
- DEVICE_DESC_PARAM_OEM_ID = 0x17,
- DEVICE_DESC_PARAM_MANF_ID = 0x18,
- DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
- DEVICE_DESC_PARAM_UD_LEN = 0x1B,
- DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
- DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
- DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
- DEVICE_DESC_PARAM_FFU_TMT = 0x20,
- DEVICE_DESC_PARAM_Q_DPTH = 0x21,
- DEVICE_DESC_PARAM_DEV_VER = 0x22,
- DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
- DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
- DEVICE_DESC_PARAM_PSA_TMT = 0x29,
- DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
- DEVICE_DESC_PARAM_HPB_VER = 0x40,
- DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
- DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
- DEVICE_DESC_PARAM_WB_TYPE = 0x54,
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
-};
-
-/* Interconnect descriptor parameters offsets in bytes*/
-enum interconnect_desc_param {
- INTERCONNECT_DESC_PARAM_LEN = 0x0,
- INTERCONNECT_DESC_PARAM_TYPE = 0x1,
- INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
- INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
-};
-
-/* Geometry descriptor parameters offsets in bytes*/
-enum geometry_desc_param {
- GEOMETRY_DESC_PARAM_LEN = 0x0,
- GEOMETRY_DESC_PARAM_TYPE = 0x1,
- GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
- GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
- GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
- GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
- GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
- GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
- GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
- GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
- GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
- GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
- GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
- GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
- GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
- GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
- GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
- GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
- GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
- GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
- GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
- GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
- GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
- GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
- GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
- GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
- GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
- GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
- GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
- GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
- GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
- GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
- GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
- GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
- GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
- GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
- GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
- GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
- GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
- GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
-};
-
-/* Health descriptor parameters offsets in bytes*/
-enum health_desc_param {
- HEALTH_DESC_PARAM_LEN = 0x0,
- HEALTH_DESC_PARAM_TYPE = 0x1,
- HEALTH_DESC_PARAM_EOL_INFO = 0x2,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
-};
-
-/* WriteBooster buffer mode */
-enum {
- WB_BUF_MODE_LU_DEDICATED = 0x0,
- WB_BUF_MODE_SHARED = 0x1,
-};
-
-/*
- * Logical Unit Write Protect
- * 00h: LU not write protected
- * 01h: LU write protected when fPowerOnWPEn =1
- * 02h: LU permanently write protected when fPermanentWPEn =1
- */
-enum ufs_lu_wp_type {
- UFS_LU_NO_WP = 0x00,
- UFS_LU_POWER_ON_WP = 0x01,
- UFS_LU_PERM_WP = 0x02,
-};
-
-/* bActiveICCLevel parameter current units */
-enum {
- UFSHCD_NANO_AMP = 0,
- UFSHCD_MICRO_AMP = 1,
- UFSHCD_MILI_AMP = 2,
- UFSHCD_AMP = 3,
-};
-
-/* Possible values for dExtendedUFSFeaturesSupport */
-enum {
- UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
- UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
- UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
- UFS_DEV_HPB_SUPPORT = BIT(7),
- UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
-};
-#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
-
-#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
-
-/* Attribute bActiveICCLevel parameter bit masks definitions */
-#define ATTR_ICC_LVL_UNIT_OFFSET 14
-#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
-#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
-
-/* Power descriptor parameters offsets in bytes */
-enum power_desc_param_offset {
- PWR_DESC_LEN = 0x0,
- PWR_DESC_TYPE = 0x1,
- PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
- PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
- PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
-};
-
-/* Exception event mask values */
-enum {
- MASK_EE_STATUS = 0xFFFF,
- MASK_EE_DYNCAP_EVENT = BIT(0),
- MASK_EE_SYSPOOL_EVENT = BIT(1),
- MASK_EE_URGENT_BKOPS = BIT(2),
- MASK_EE_TOO_HIGH_TEMP = BIT(3),
- MASK_EE_TOO_LOW_TEMP = BIT(4),
- MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
- MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
-};
-#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
-
-/* Background operation status */
-enum bkops_status {
- BKOPS_STATUS_NO_OP = 0x0,
- BKOPS_STATUS_NON_CRITICAL = 0x1,
- BKOPS_STATUS_PERF_IMPACT = 0x2,
- BKOPS_STATUS_CRITICAL = 0x3,
- BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
-};
-
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
- UPIU_QUERY_OPCODE_NOP = 0x0,
- UPIU_QUERY_OPCODE_READ_DESC = 0x1,
- UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
- UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
- UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
- UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
- UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
- UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
- UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
-};
-
-/* bRefClkFreq attribute values */
-enum ufs_ref_clk_freq {
- REF_CLK_FREQ_19_2_MHZ = 0,
- REF_CLK_FREQ_26_MHZ = 1,
- REF_CLK_FREQ_38_4_MHZ = 2,
- REF_CLK_FREQ_52_MHZ = 3,
- REF_CLK_FREQ_INVAL = -1,
-};
-
-/* Query response result code */
-enum {
- QUERY_RESULT_SUCCESS = 0x00,
- QUERY_RESULT_NOT_READABLE = 0xF6,
- QUERY_RESULT_NOT_WRITEABLE = 0xF7,
- QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
- QUERY_RESULT_INVALID_LENGTH = 0xF9,
- QUERY_RESULT_INVALID_VALUE = 0xFA,
- QUERY_RESULT_INVALID_SELECTOR = 0xFB,
- QUERY_RESULT_INVALID_INDEX = 0xFC,
- QUERY_RESULT_INVALID_IDN = 0xFD,
- QUERY_RESULT_INVALID_OPCODE = 0xFE,
- QUERY_RESULT_GENERAL_FAILURE = 0xFF,
-};
-
-/* UTP Transfer Request Command Type (CT) */
-enum {
- UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
- UPIU_COMMAND_SET_TYPE_UFS = 0x1,
- UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
-};
-
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
-/* Offset of the response code in the UPIU header */
-#define UPIU_RSP_CODE_OFFSET 8
-
-enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
- MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
-};
-
-/* Task management service response */
-enum {
- UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
- UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
- UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
- UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
- UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
-};
-
-/* UFS device power modes */
-enum ufs_dev_pwr_mode {
- UFS_ACTIVE_PWR_MODE = 1,
- UFS_SLEEP_PWR_MODE = 2,
- UFS_POWERDOWN_PWR_MODE = 3,
- UFS_DEEPSLEEP_PWR_MODE = 4,
-};
-
-#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
-
-/**
- * struct utp_cmd_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @sense_data: Sense data field DW-8 to DW-12
- */
-struct utp_cmd_rsp {
- __be32 residual_transfer_count;
- __be32 reserved[4];
- __be16 sense_data_len;
- u8 sense_data[UFS_SENSE_SIZE];
-};
-
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
-/**
- * struct utp_upiu_rsp - general upiu response structure
- * @header: UPIU header structure DW-0 to DW-2
- * @sr: fields structure for scsi command DW-3 to DW-12
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_rsp {
- struct utp_upiu_header header;
- union {
- struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
- struct utp_upiu_query qr;
- };
-};
-
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
-/*
- * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
- * and link is in Hibern8 state.
- */
-#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
-
-struct ufs_vreg {
- struct regulator *reg;
- const char *name;
- bool always_on;
- bool enabled;
- int max_uA;
-};
-
-struct ufs_vreg_info {
- struct ufs_vreg *vcc;
- struct ufs_vreg *vccq;
- struct ufs_vreg *vccq2;
- struct ufs_vreg *vdd_hba;
-};
-
-struct ufs_dev_info {
- bool f_power_on_wp_en;
- /* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
- /* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u16 wmanufacturerid;
- /*UFS device Product Name */
- u8 *model;
- u16 wspecversion;
- u32 clk_gating_wait_us;
-
- /* UFS HPB related flag */
- bool hpb_enabled;
-
- /* UFS WB related flags */
- bool wb_enabled;
- bool wb_buf_flush_enabled;
- u8 wb_dedicated_lu;
- u8 wb_buffer_type;
-
- bool b_rpm_dev_flush_capable;
- u8 b_presrv_uspc_en;
-};
-
-/*
- * This enum is used in string mapping in include/trace/events/ufs.h.
- */
-enum ufs_trace_str_t {
- UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
- UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
- UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
-};
-
-/*
- * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in include/trace/events/ufs.h for UFS command trace.
- */
-enum ufs_trace_tsf_t {
- UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
deleted file mode 100644
index bcb4f004bed5..000000000000
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _UFS_QUIRKS_H_
-#define _UFS_QUIRKS_H_
-
-/* return true if s1 is a prefix of s2 */
-#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
-
-#define UFS_ANY_VENDOR 0xFFFF
-#define UFS_ANY_MODEL "ANY_MODEL"
-
-#define UFS_VENDOR_MICRON 0x12C
-#define UFS_VENDOR_SAMSUNG 0x1CE
-#define UFS_VENDOR_SKHYNIX 0x1AD
-#define UFS_VENDOR_TOSHIBA 0x198
-#define UFS_VENDOR_WDC 0x145
-
-/**
- * ufs_dev_quirk - ufs device quirk info
- * @card: ufs card details
- * @quirk: device quirk
- */
-struct ufs_dev_quirk {
- u16 wmanufacturerid;
- const u8 *model;
- unsigned int quirk;
-};
-
-/*
- * Some vendor's UFS device sends back to back NACs for the DL data frames
- * causing the host controller to raise the DFES error status. Sometimes
- * such UFS devices send back to back NAC without waiting for new
- * retransmitted DL frame from the host and in such cases it might be possible
- * the Host UniPro goes into bad state without raising the DFES error
- * interrupt. If this happens then all the pending commands would timeout
- * only after respective SW command (which is generally too large).
- *
- * We can workaround such device behaviour like this:
- * - As soon as SW sees the DL NAC error, it should schedule the error handler
- * - Error handler would sleep for 50ms to see if there are any fatal errors
- * raised by UFS controller.
- * - If there are fatal errors then SW does normal error recovery.
- * - If there are no fatal errors then SW sends the NOP command to device
- * to check if link is alive.
- * - If NOP command times out, SW does normal error recovery
- * - If NOP command succeed, skip the error handling.
- *
- * If DL NAC error is seen multiple times with some vendor's UFS devices then
- * enable this quirk to initiate quick error recovery and also silence related
- * error logs to reduce spamming of kernel logs.
- */
-#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
-
-/*
- * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
- * 600us which may not be enough for reliable hibern8 exit hardware sequence
- * from UFS device.
- * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
- * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
- */
-#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
-
-/*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
- */
-#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
-
-/*
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
-
-/*
- * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
- * some vendors.
- * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
- * Gear switch can be issued by host controller as an error recovery and any
- * software delay will not help on this case so we need to increase
- * PA_SaveConfigTime to >32us as per vendor recommendation.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
-
-/*
- * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
- * enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
-
-/*
- * Some pre-3.1 UFS devices can support extended features by upgrading
- * the firmware. Enable this quirk to make UFS core driver probe and enable
- * supported features on such devices.
- */
-#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
-
-/*
- * Some UFS devices require delay after VCC power rail is turned-off.
- * Enable this quirk to introduce 5ms delays after VCC power-off during
- * suspend flow.
- */
-#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
-#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
deleted file mode 100644
index 2b0f3441b813..000000000000
--- a/drivers/scsi/ufs/ufshcd.h
+++ /dev/null
@@ -1,1230 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFSHCD_H
-#define _UFSHCD_H
-
-#include <linux/bitfield.h>
-#include <linux/blk-crypto-profile.h>
-#include <linux/blk-mq.h>
-#include <linux/devfreq.h>
-#include <linux/pm_runtime.h>
-#include <scsi/scsi_device.h>
-#include "unipro.h"
-#include "ufs.h"
-#include "ufs_quirks.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-
-struct ufs_hba;
-
-enum dev_cmd_type {
- DEV_CMD_TYPE_NOP = 0x0,
- DEV_CMD_TYPE_QUERY = 0x1,
-};
-
-enum ufs_event_type {
- /* uic specific errors */
- UFS_EVT_PA_ERR = 0,
- UFS_EVT_DL_ERR,
- UFS_EVT_NL_ERR,
- UFS_EVT_TL_ERR,
- UFS_EVT_DME_ERR,
-
- /* fatal errors */
- UFS_EVT_AUTO_HIBERN8_ERR,
- UFS_EVT_FATAL_ERR,
- UFS_EVT_LINK_STARTUP_FAIL,
- UFS_EVT_RESUME_ERR,
- UFS_EVT_SUSPEND_ERR,
- UFS_EVT_WL_SUSP_ERR,
- UFS_EVT_WL_RES_ERR,
-
- /* abnormal events */
- UFS_EVT_DEV_RESET,
- UFS_EVT_HOST_RESET,
- UFS_EVT_ABORT,
-
- UFS_EVT_CNT,
-};
-
-/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @done: UIC command completion
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- struct completion done;
-};
-
-/* Used to differentiate the power management options */
-enum ufs_pm_op {
- UFS_RUNTIME_PM,
- UFS_SYSTEM_PM,
- UFS_SHUTDOWN_PM,
-};
-
-/* Host <-> Device UniPro Link state */
-enum uic_link_state {
- UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
- UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
- UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
- UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
-};
-
-#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
-#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
- UIC_LINK_HIBERN8_STATE)
-#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
- UIC_LINK_BROKEN_STATE)
-#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
-#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
- UIC_LINK_HIBERN8_STATE)
-#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
- UIC_LINK_BROKEN_STATE)
-
-#define ufshcd_set_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
-#define ufshcd_set_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
-#define ufshcd_set_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_set_ufs_dev_deepsleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
-#define ufshcd_is_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_is_ufs_dev_deepsleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
-
-/*
- * UFS Power management levels.
- * Each level is in increasing order of power savings, except DeepSleep
- * which is lower than PowerDown with power on but not PowerDown with
- * power off.
- */
-enum ufs_pm_level {
- UFS_PM_LVL_0,
- UFS_PM_LVL_1,
- UFS_PM_LVL_2,
- UFS_PM_LVL_3,
- UFS_PM_LVL_4,
- UFS_PM_LVL_5,
- UFS_PM_LVL_6,
- UFS_PM_LVL_MAX
-};
-
-struct ufs_pm_lvl_states {
- enum ufs_dev_pwr_mode dev_state;
- enum uic_link_state link_state;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_req_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @utrd_dma_addr: UTRD dma address for debug
- * @ucd_prdt_dma_addr: PRDT dma address for debug
- * @ucd_rsp_dma_addr: UPIU response dma address for debug
- * @ucd_req_dma_addr: UPIU request dma address for debug
- * @cmd: pointer to SCSI command
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
- * @issue_time_stamp: time stamp for debug purposes
- * @compl_time_stamp: time stamp for statistics
- * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
- * @data_unit_num: the data unit number for the first block for inline crypto
- * @req_abort_skip: skip request abort task flag
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_req *ucd_req_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- dma_addr_t utrd_dma_addr;
- dma_addr_t ucd_req_dma_addr;
- dma_addr_t ucd_rsp_dma_addr;
- dma_addr_t ucd_prdt_dma_addr;
-
- struct scsi_cmnd *cmd;
- int scsi_status;
-
- int command_type;
- int task_tag;
- u8 lun; /* UPIU LUN id field is only 8-bit wide */
- bool intr_cmd;
- ktime_t issue_time_stamp;
- ktime_t compl_time_stamp;
-#ifdef CONFIG_SCSI_UFS_CRYPTO
- int crypto_key_slot;
- u64 data_unit_num;
-#endif
-
- bool req_abort_skip;
-};
-
-/**
- * struct ufs_query - holds relevant data structures for query request
- * @request: request upiu and function
- * @descriptor: buffer for sending/receiving descriptor
- * @response: response upiu and response
- */
-struct ufs_query {
- struct ufs_query_req request;
- u8 *descriptor;
- struct ufs_query_res response;
-};
-
-/**
- * struct ufs_dev_cmd - all assosiated fields with device management commands
- * @type: device management command type - Query, NOP OUT
- * @lock: lock to allow one command at a time
- * @complete: internal commands completion
- * @query: Device management query information
- */
-struct ufs_dev_cmd {
- enum dev_cmd_type type;
- struct mutex lock;
- struct completion *complete;
- struct ufs_query query;
-};
-
-/**
- * struct ufs_clk_info - UFS clock related info
- * @list: list headed by hba->clk_list_head
- * @clk: clock node
- * @name: clock name
- * @max_freq: maximum frequency supported by the clock
- * @min_freq: min frequency that can be used for clock scaling
- * @curr_freq: indicates the current frequency that it is set to
- * @keep_link_active: indicates that the clk should not be disabled if
- * link is active
- * @enabled: variable to check against multiple enable/disable
- */
-struct ufs_clk_info {
- struct list_head list;
- struct clk *clk;
- const char *name;
- u32 max_freq;
- u32 min_freq;
- u32 curr_freq;
- bool keep_link_active;
- bool enabled;
-};
-
-enum ufs_notify_change_status {
- PRE_CHANGE,
- POST_CHANGE,
-};
-
-struct ufs_pa_layer_attr {
- u32 gear_rx;
- u32 gear_tx;
- u32 lane_rx;
- u32 lane_tx;
- u32 pwr_rx;
- u32 pwr_tx;
- u32 hs_rate;
-};
-
-struct ufs_pwr_mode_info {
- bool is_valid;
- struct ufs_pa_layer_attr info;
-};
-
-/**
- * struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
- * @init: called when the driver is initialized
- * @exit: called to cleanup everything done in init
- * @get_ufs_hci_version: called to get UFS HCI version
- * @clk_scale_notify: notifies that clks are scaled up/down
- * @setup_clocks: called before touching any of the controller registers
- * @hce_enable_notify: called before and after HCE enable bit is set to allow
- * variant specific Uni-Pro initialization.
- * @link_startup_notify: called before and after Link startup is carried out
- * to allow variant specific Uni-Pro initialization.
- * @pwr_change_notify: called before and after a power mode change
- * is carried out to allow vendor spesific capabilities
- * to be set.
- * @setup_xfer_req: called before any transfer request is issued
- * to set some things
- * @setup_task_mgmt: called before any task management request is issued
- * to set some things
- * @hibern8_notify: called around hibern8 enter/exit
- * @apply_dev_quirks: called to apply device specific quirks
- * @fixup_dev_quirks: called to modify device specific quirks
- * @suspend: called during host controller PM callback
- * @resume: called during host controller PM callback
- * @dbg_register_dump: used to dump controller debug information
- * @phy_initialization: used to initialize phys
- * @device_reset: called to issue a reset pulse on the UFS device
- * @config_scaling_param: called to configure clock scaling parameters
- * @program_key: program or evict an inline encryption key
- * @event_notify: called to notify important events
- */
-struct ufs_hba_variant_ops {
- const char *name;
- int (*init)(struct ufs_hba *);
- void (*exit)(struct ufs_hba *);
- u32 (*get_ufs_hci_version)(struct ufs_hba *);
- int (*clk_scale_notify)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*hce_enable_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*link_startup_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *,
- struct ufs_pa_layer_attr *);
- void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
- bool is_scsi_cmd);
- void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
- void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *hba);
- void (*fixup_dev_quirks)(struct ufs_hba *hba);
- int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
- enum ufs_notify_change_status);
- int (*resume)(struct ufs_hba *, enum ufs_pm_op);
- void (*dbg_register_dump)(struct ufs_hba *hba);
- int (*phy_initialization)(struct ufs_hba *);
- int (*device_reset)(struct ufs_hba *hba);
- void (*config_scaling_param)(struct ufs_hba *hba,
- struct devfreq_dev_profile *profile,
- struct devfreq_simple_ondemand_data *data);
- int (*program_key)(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
- void (*event_notify)(struct ufs_hba *hba,
- enum ufs_event_type evt, void *data);
-};
-
-/* clock gating state */
-enum clk_gating_state {
- CLKS_OFF,
- CLKS_ON,
- REQ_CLKS_OFF,
- REQ_CLKS_ON,
-};
-
-/**
- * struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
- * @ungate_work: worker to turn on clocks that will be used in case of
- * interrupt context
- * @state: the current clocks state
- * @delay_ms: gating delay in ms
- * @is_suspended: clk gating is suspended when set to 1 which can be used
- * during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
- * @enable_attr: sysfs attribute to enable/disable clock gating
- * @is_enabled: Indicates the current status of clock gating
- * @is_initialized: Indicates whether clock gating is initialized or not
- * @active_reqs: number of requests that are pending and should be waited for
- * completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
- */
-struct ufs_clk_gating {
- struct delayed_work gate_work;
- struct work_struct ungate_work;
- enum clk_gating_state state;
- unsigned long delay_ms;
- bool is_suspended;
- struct device_attribute delay_attr;
- struct device_attribute enable_attr;
- bool is_enabled;
- bool is_initialized;
- int active_reqs;
- struct workqueue_struct *clk_gating_workq;
-};
-
-struct ufs_saved_pwr_info {
- struct ufs_pa_layer_attr info;
- bool is_valid;
-};
-
-/**
- * struct ufs_clk_scaling - UFS clock scaling related data
- * @active_reqs: number of requests that are pending. If this is zero when
- * devfreq ->target() function is called then schedule "suspend_work" to
- * suspend devfreq.
- * @tot_busy_t: Total busy time in current polling window
- * @window_start_t: Start time (in jiffies) of the current polling window
- * @busy_start_t: Start time of current busy period
- * @enable_attr: sysfs attribute to enable/disable clock scaling
- * @saved_pwr_info: UFS power mode may also be changed during scaling and this
- * one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
- * @min_gear: lowest HS gear to scale down to
- * @is_enabled: tracks if scaling is currently enabled or not, controlled by
- * clkscale_enable sysfs node
- * @is_allowed: tracks if scaling is currently allowed or not, used to block
- * clock scaling which is not invoked from devfreq governor
- * @is_initialized: Indicates whether clock scaling is initialized or not
- * @is_busy_started: tracks if busy period has started or not
- * @is_suspended: tracks if devfreq is suspended or not
- */
-struct ufs_clk_scaling {
- int active_reqs;
- unsigned long tot_busy_t;
- ktime_t window_start_t;
- ktime_t busy_start_t;
- struct device_attribute enable_attr;
- struct ufs_saved_pwr_info saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
- u32 min_gear;
- bool is_enabled;
- bool is_allowed;
- bool is_initialized;
- bool is_busy_started;
- bool is_suspended;
-};
-
-#define UFS_EVENT_HIST_LENGTH 8
-/**
- * struct ufs_event_hist - keeps history of errors
- * @pos: index to indicate cyclic buffer position
- * @val: cyclic buffer for registers value
- * @tstamp: cyclic buffer for time stamp
- * @cnt: error counter
- */
-struct ufs_event_hist {
- int pos;
- u32 val[UFS_EVENT_HIST_LENGTH];
- ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
- unsigned long long cnt;
-};
-
-/**
- * struct ufs_stats - keeps usage/err statistics
- * @last_intr_status: record the last interrupt status.
- * @last_intr_ts: record the last interrupt timestamp.
- * @hibern8_exit_cnt: Counter to keep track of number of exits,
- * reset this after link-startup.
- * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
- * Clear after the first successful command completion.
- * @event: array with event history.
- */
-struct ufs_stats {
- u32 last_intr_status;
- ktime_t last_intr_ts;
-
- u32 hibern8_exit_cnt;
- ktime_t last_hibern8_exit_tstamp;
- struct ufs_event_hist event[UFS_EVT_CNT];
-};
-
-/**
- * enum ufshcd_state - UFS host controller state
- * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
- * processing.
- * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
- * SCSI commands.
- * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
- * SCSI commands may be submitted to the controller.
- * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
- * newly submitted SCSI commands with error code DID_BAD_TARGET.
- * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
- * failed. Fail all SCSI commands with error code DID_ERROR.
- */
-enum ufshcd_state {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
- UFSHCD_STATE_EH_SCHEDULED_FATAL,
- UFSHCD_STATE_ERROR,
-};
-
-enum ufshcd_quirks {
- /* Interrupt aggregation support is broken */
- UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
-
- /*
- * delay before each dme command is required as the unipro
- * layer has shown instabilities
- */
- UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
-
- /*
- * If UFS host controller is having issue in processing LCC (Line
- * Control Command) coming from device then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
- * attribute of device to 0).
- */
- UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
-
- /*
- * The attribute PA_RXHSUNTERMCAP specifies whether or not the
- * inbound Link supports unterminated line in HS mode. Setting this
- * attribute to 1 fixes moving to HS gear.
- */
- UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
-
- /*
- * This quirk needs to be enabled if the host controller only allows
- * accessing the peer dme attributes in AUTO mode (FAST AUTO or
- * SLOW AUTO).
- */
- UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
-
- /*
- * This quirk needs to be enabled if the host controller doesn't
- * advertise the correct version in UFS_VER register. If this quirk
- * is enabled, standard UFS host driver will call the vendor specific
- * ops (get_ufs_hci_version) to get the correct version.
- */
- UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
-
- /*
- * Clear handling for transfer/task request list is just opposite.
- */
- UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
-
- /*
- * This quirk needs to be enabled if host controller doesn't allow
- * that the interrupt aggregation timer and counter are reset by s/w.
- */
- UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
-
- /*
- * This quirks needs to be enabled if host controller cannot be
- * enabled via HCE register.
- */
- UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
-
- /*
- * This quirk needs to be enabled if the host controller regards
- * resolution of the values of PRDTO and PRDTL in UTRD as byte.
- */
- UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
-
- /*
- * This quirk needs to be enabled if the host controller reports
- * OCS FATAL ERROR with device error through sense data
- */
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
-
- /*
- * This quirk needs to be enabled if the host controller has
- * auto-hibernate capability but it doesn't work.
- */
- UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
-
- /*
- * This quirk needs to disable manual flush for write booster
- */
- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
-
- /*
- * This quirk needs to disable unipro timeout values
- * before power mode change
- */
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
-
- /*
- * This quirk allows only sg entries aligned with page size.
- */
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
-
- /*
- * This quirk needs to be enabled if the host controller does not
- * support UIC command
- */
- UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
-
- /*
- * This quirk needs to be enabled if the host controller cannot
- * support physical host configuration.
- */
- UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
-};
-
-enum ufshcd_caps {
- /* Allow dynamic clk gating */
- UFSHCD_CAP_CLK_GATING = 1 << 0,
-
- /* Allow hiberb8 with clk gating */
- UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
-
- /* Allow dynamic clk scaling */
- UFSHCD_CAP_CLK_SCALING = 1 << 2,
-
- /* Allow auto bkops to enabled during runtime suspend */
- UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
-
- /*
- * This capability allows host controller driver to use the UFS HCI's
- * interrupt aggregation capability.
- * CAUTION: Enabling this might reduce overall UFS throughput.
- */
- UFSHCD_CAP_INTR_AGGR = 1 << 4,
-
- /*
- * This capability allows the device auto-bkops to be always enabled
- * except during suspend (both runtime and suspend).
- * Enabling this capability means that device will always be allowed
- * to do background operation when it's active but it might degrade
- * the performance of ongoing read/write operations.
- */
- UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
-
- /*
- * This capability allows host controller driver to automatically
- * enable runtime power management by itself instead of waiting
- * for userspace to control the power management.
- */
- UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
-
- /*
- * This capability allows the host controller driver to turn-on
- * WriteBooster, if the underlying device supports it and is
- * provisioned to be used. This would increase the write performance.
- */
- UFSHCD_CAP_WB_EN = 1 << 7,
-
- /*
- * This capability allows the host controller driver to use the
- * inline crypto engine, if it is present
- */
- UFSHCD_CAP_CRYPTO = 1 << 8,
-
- /*
- * This capability allows the controller regulators to be put into
- * lpm mode aggressively during clock gating.
- * This would increase power savings.
- */
- UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
-
- /*
- * This capability allows the host controller driver to use DeepSleep,
- * if it is supported by the UFS device. The host controller driver must
- * support device hardware reset via the hba->device_reset() callback,
- * in order to exit DeepSleep state.
- */
- UFSHCD_CAP_DEEPSLEEP = 1 << 10,
-
- /*
- * This capability allows the host controller driver to use temperature
- * notification if it is supported by the UFS device.
- */
- UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
-};
-
-struct ufs_hba_variant_params {
- struct devfreq_dev_profile devfreq_profile;
- struct devfreq_simple_ondemand_data ondemand_data;
- u16 hba_enable_delay_us;
- u32 wb_flush_threshold;
-};
-
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
-struct ufs_hba_monitor {
- unsigned long chunk_size;
-
- unsigned long nr_sec_rw[2];
- ktime_t total_busy[2];
-
- unsigned long nr_req[2];
- /* latencies*/
- ktime_t lat_sum[2];
- ktime_t lat_max[2];
- ktime_t lat_min[2];
-
- u32 nr_queued[2];
- ktime_t busy_start_ts[2];
-
- ktime_t enabled_ts;
- bool enabled;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @dev: device handle
- * @ufs_device_wlun: WLUN that controls the entire UFS device.
- * @hwmon_device: device instance registered with the hwmon core.
- * @curr_dev_pwr_mode: active UFS device power mode.
- * @uic_link_state: active state of the link to the UFS device.
- * @rpm_lvl: desired UFS power management level during runtime PM.
- * @spm_lvl: desired UFS power management level during system PM.
- * @pm_op_in_progress: whether or not a PM operation is in progress.
- * @ahit: value of Auto-Hibernate Idle Timer register.
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_lock: Protects @outstanding_reqs.
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
- * @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
- * @vps: pointer to variant specific parameters
- * @priv: pointer to variant specific private data
- * @irq: Irq number of the controller
- * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
- * @dev_ref_clk_freq: reference clock frequency
- * @quirks: bitmask with information about deviations from the UFSHCI standard.
- * @dev_quirks: bitmask with information about deviations from the UFS standard.
- * @tmf_tag_set: TMF tag set.
- * @tmf_queue: Used to allocate TMF tags.
- * @tmf_rqs: array with pointers to TMF requests while these are in progress.
- * @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for UIC command
- * @uic_async_done: completion used during UIC processing
- * @ufshcd_state: UFSHCD state
- * @eh_flags: Error handling flags
- * @intr_mask: Interrupt Mask Bits
- * @ee_ctrl_mask: Exception event control mask
- * @ee_drv_mask: Exception event mask for driver
- * @ee_usr_mask: Exception event mask for user (set via debugfs)
- * @ee_ctrl_mutex: Used to serialize exception event information.
- * @is_powered: flag to check if HBA is powered
- * @shutting_down: flag to check if shutdown has been invoked
- * @host_sem: semaphore used to serialize concurrent contexts
- * @eh_wq: Workqueue that eh_work works on
- * @eh_work: Worker to handle UFS errors that require s/w attention
- * @eeh_work: Worker to handle exception events
- * @errors: HBA errors
- * @uic_error: UFS interconnect layer error status
- * @saved_err: sticky error mask
- * @saved_uic_err: sticky UIC error mask
- * @ufs_stats: various error counters
- * @force_reset: flag to force eh_work perform a full reset
- * @force_pmc: flag to force a power mode change
- * @silence_err_logs: flag to silence error logs
- * @dev_cmd: ufs device management command information
- * @last_dme_cmd_tstamp: time stamp of the last completed DME command
- * @nop_out_timeout: NOP OUT timeout value
- * @dev_info: information about the UFS device
- * @auto_bkops_enabled: to track whether bkops is enabled in device
- * @vreg_info: UFS device voltage regulator information
- * @clk_list_head: UFS host controller clocks list node head
- * @req_abort_count: number of times ufshcd_abort() has been called
- * @lanes_per_direction: number of lanes per data direction between the UFS
- * controller and the UFS device.
- * @pwr_info: holds current power mode
- * @max_pwr_info: keeps the device max valid pwm
- * @clk_gating: information related to clock gating
- * @caps: bitmask with information about UFS controller capabilities
- * @devfreq: frequency scaling information owned by the devfreq core
- * @clk_scaling: frequency scaling information owned by the UFS driver
- * @is_sys_suspended: whether or not the entire system has been suspended
- * @urgent_bkops_lvl: keeps track of urgent bkops level for device
- * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
- * device is known or not.
- * @clk_scaling_lock: used to serialize device commands and clock scaling
- * @desc_size: descriptor sizes reported by device
- * @scsi_block_reqs_cnt: reference counting for scsi block requests
- * @bsg_dev: struct device associated with the BSG queue
- * @bsg_queue: BSG queue associated with the UFS controller
- * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
- * management) after the UFS device has finished a WriteBooster buffer
- * flush or auto BKOP.
- * @ufshpb_dev: information related to HPB (Host Performance Booster).
- * @monitor: statistics about UFS commands
- * @crypto_capabilities: Content of crypto capabilities register (0x100)
- * @crypto_cap_array: Array of crypto capabilities
- * @crypto_cfg_register: Start of the crypto cfg array
- * @crypto_profile: the crypto profile of this hba (if applicable)
- * @debugfs_root: UFS controller debugfs root directory
- * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
- * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
- * ee_ctrl_mask
- * @luns_avail: number of regular and well known LUNs supported by the UFS
- * device
- * @complete_put: whether or not to call ufshcd_rpm_put() from inside
- * ufshcd_resume_complete()
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct device *dev;
- struct scsi_device *ufs_device_wlun;
-
-#ifdef CONFIG_SCSI_UFS_HWMON
- struct device *hwmon_device;
-#endif
-
- enum ufs_dev_pwr_mode curr_dev_pwr_mode;
- enum uic_link_state uic_link_state;
- /* Desired UFS power management level during runtime PM */
- enum ufs_pm_level rpm_lvl;
- /* Desired UFS power management level during system PM */
- enum ufs_pm_level spm_lvl;
- int pm_op_in_progress;
-
- /* Auto-Hibernate Idle Timer register value */
- u32 ahit;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- spinlock_t outstanding_lock;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 reserved_slot;
- u32 ufs_version;
- const struct ufs_hba_variant_ops *vops;
- struct ufs_hba_variant_params *vps;
- void *priv;
- unsigned int irq;
- bool is_irq_enabled;
- enum ufs_ref_clk_freq dev_ref_clk_freq;
-
- unsigned int quirks; /* Deviations from standard UFSHCI spec. */
-
- /* Device deviations from standard UFS device spec. */
- unsigned int dev_quirks;
-
- struct blk_mq_tag_set tmf_tag_set;
- struct request_queue *tmf_queue;
- struct request **tmf_rqs;
-
- struct uic_command *active_uic_cmd;
- struct mutex uic_cmd_mutex;
- struct completion *uic_async_done;
-
- enum ufshcd_state ufshcd_state;
- u32 eh_flags;
- u32 intr_mask;
- u16 ee_ctrl_mask;
- u16 ee_drv_mask;
- u16 ee_usr_mask;
- struct mutex ee_ctrl_mutex;
- bool is_powered;
- bool shutting_down;
- struct semaphore host_sem;
-
- /* Work Queues */
- struct workqueue_struct *eh_wq;
- struct work_struct eh_work;
- struct work_struct eeh_work;
-
- /* HBA Errors */
- u32 errors;
- u32 uic_error;
- u32 saved_err;
- u32 saved_uic_err;
- struct ufs_stats ufs_stats;
- bool force_reset;
- bool force_pmc;
- bool silence_err_logs;
-
- /* Device management request data */
- struct ufs_dev_cmd dev_cmd;
- ktime_t last_dme_cmd_tstamp;
- int nop_out_timeout;
-
- /* Keeps information of the UFS device connected to this host */
- struct ufs_dev_info dev_info;
- bool auto_bkops_enabled;
- struct ufs_vreg_info vreg_info;
- struct list_head clk_list_head;
-
- /* Number of requests aborts */
- int req_abort_count;
-
- /* Number of lanes available (1 or 2) for Rx/Tx */
- u32 lanes_per_direction;
- struct ufs_pa_layer_attr pwr_info;
- struct ufs_pwr_mode_info max_pwr_info;
-
- struct ufs_clk_gating clk_gating;
- /* Control to enable/disable host capabilities */
- u32 caps;
-
- struct devfreq *devfreq;
- struct ufs_clk_scaling clk_scaling;
- bool is_sys_suspended;
-
- enum bkops_status urgent_bkops_lvl;
- bool is_urgent_bkops_lvl_checked;
-
- struct rw_semaphore clk_scaling_lock;
- unsigned char desc_size[QUERY_DESC_IDN_MAX];
- atomic_t scsi_block_reqs_cnt;
-
- struct device bsg_dev;
- struct request_queue *bsg_queue;
- struct delayed_work rpm_dev_flush_recheck_work;
-
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
- struct ufs_hba_monitor monitor;
-
-#ifdef CONFIG_SCSI_UFS_CRYPTO
- union ufs_crypto_capabilities crypto_capabilities;
- union ufs_crypto_cap_entry *crypto_cap_array;
- u32 crypto_cfg_register;
- struct blk_crypto_profile crypto_profile;
-#endif
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_root;
- struct delayed_work debugfs_ee_work;
- u32 debugfs_ee_rate_limit_ms;
-#endif
- u32 luns_avail;
- bool complete_put;
-};
-
-/* Returns true if clocks can be gated. Otherwise false */
-static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_GATING;
-}
-static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
-}
-static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_SCALING;
-}
-static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
-}
-static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
-}
-
-static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
-{
- return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
-}
-
-static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
-{
- return !!(ufshcd_is_link_hibern8(hba) &&
- (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
-}
-
-static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
-{
- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
-}
-
-static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
-{
- return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
-}
-
-static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_WB_EN;
-}
-
-#define ufshcd_writel(hba, val, reg) \
- writel((val), (hba)->mmio_base + (reg))
-#define ufshcd_readl(hba, reg) \
- readl((hba)->mmio_base + (reg))
-
-/**
- * ufshcd_rmwl - perform read/modify/write for a controller register
- * @hba: per adapter instance
- * @mask: mask to apply on read value
- * @val: actual value to write
- * @reg: register address
- */
-static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
-{
- u32 tmp;
-
- tmp = ufshcd_readl(hba, reg);
- tmp &= ~mask;
- tmp |= (val & mask);
- ufshcd_writel(hba, tmp, reg);
-}
-
-int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
-int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
-int ufshcd_link_recovery(struct ufs_hba *hba);
-int ufshcd_make_hba_operational(struct ufs_hba *hba);
-void ufshcd_remove(struct ufs_hba *);
-int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
-int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
-void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
-void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
-void ufshcd_hba_stop(struct ufs_hba *hba);
-void ufshcd_schedule_eh_work(struct ufs_hba *hba);
-
-static inline void check_upiu_size(void)
-{
- BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
- GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
-
-/**
- * ufshcd_set_variant - set variant specific data to the hba
- * @hba: per adapter instance
- * @variant: pointer to variant specific data
- */
-static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
-{
- BUG_ON(!hba);
- hba->priv = variant;
-}
-
-/**
- * ufshcd_get_variant - get variant specific data from the hba
- * @hba: per adapter instance
- */
-static inline void *ufshcd_get_variant(struct ufs_hba *hba)
-{
- BUG_ON(!hba);
- return hba->priv;
-}
-
-#ifdef CONFIG_PM
-extern int ufshcd_runtime_suspend(struct device *dev);
-extern int ufshcd_runtime_resume(struct device *dev);
-#endif
-#ifdef CONFIG_PM_SLEEP
-extern int ufshcd_system_suspend(struct device *dev);
-extern int ufshcd_system_resume(struct device *dev);
-#endif
-extern int ufshcd_shutdown(struct ufs_hba *hba);
-extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
- int agreed_gear,
- int adapt_val);
-extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
- u8 attr_set, u32 mib_val, u8 peer);
-extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
- u32 *mib_val, u8 peer);
-extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
-
-/* UIC command interfaces for DME primitives */
-#define DME_LOCAL 0
-#define DME_PEER 1
-#define ATTR_SET_NOR 0 /* NORMAL */
-#define ATTR_SET_ST 1 /* STATIC */
-
-static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
-}
-
-static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
-{
- return (pwr_info->pwr_rx == FAST_MODE ||
- pwr_info->pwr_rx == FASTAUTO_MODE) &&
- (pwr_info->pwr_tx == FAST_MODE ||
- pwr_info->pwr_tx == FASTAUTO_MODE);
-}
-
-static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
-{
- return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-}
-
-/* Expose Query-Request API */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len);
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size);
-int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector,
- u32 *attr_val);
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, u8 index, bool *flag_res);
-
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
- const struct ufs_dev_quirk *fixups);
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
-
-int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
-
-void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
-
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
-
-int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
-
-int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
-int ufshcd_suspend_prepare(struct device *dev);
-int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
-void ufshcd_resume_complete(struct device *dev);
-
-/* Wrapper functions for safely calling variant operations */
-static inline int ufshcd_vops_init(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->init)
- return hba->vops->init(hba);
-
- return 0;
-}
-
-static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->phy_initialization)
- return hba->vops->phy_initialization(hba);
-
- return 0;
-}
-
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
-
-int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix);
-
-int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
-int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
deleted file mode 100644
index f81aa95ffbc4..000000000000
--- a/drivers/scsi/ufs/ufshci.h
+++ /dev/null
@@ -1,510 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFSHCI_H
-#define _UFSHCI_H
-
-#include <scsi/scsi_host.h>
-
-enum {
- TASK_REQ_UPIU_SIZE_DWORDS = 8,
- TASK_RSP_UPIU_SIZE_DWORDS = 8,
- ALIGNED_UPIU_SIZE = 512,
-};
-
-/* UFSHCI Registers */
-enum {
- REG_CONTROLLER_CAPABILITIES = 0x00,
- REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
- REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
- REG_INTERRUPT_STATUS = 0x20,
- REG_INTERRUPT_ENABLE = 0x24,
- REG_CONTROLLER_STATUS = 0x30,
- REG_CONTROLLER_ENABLE = 0x34,
- REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
- REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
- REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
- REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
- REG_UIC_ERROR_CODE_DME = 0x48,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
- REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
- REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
- REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
- REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
- REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
- REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
- REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
- REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
- REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
- REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
- REG_UIC_COMMAND = 0x90,
- REG_UIC_COMMAND_ARG_1 = 0x94,
- REG_UIC_COMMAND_ARG_2 = 0x98,
- REG_UIC_COMMAND_ARG_3 = 0x9C,
-
- UFSHCI_REG_SPACE_SIZE = 0xA0,
-
- REG_UFS_CCAP = 0x100,
- REG_UFS_CRYPTOCAP = 0x104,
-
- UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
-};
-
-/* Controller capability masks */
-enum {
- MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
- MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
- MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
- MASK_64_ADDRESSING_SUPPORT = 0x01000000,
- MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
- MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
- MASK_CRYPTO_SUPPORT = 0x10000000,
-};
-
-#define UFS_MASK(mask, offset) ((mask) << (offset))
-
-/* UFS Version 08h */
-#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
-#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
-
-/*
- * Controller UFSHCI version
- * - 2.x and newer use the following scheme:
- * major << 8 + minor << 4
- * - 1.x has been converted to match this in
- * ufshcd_get_ufs_version()
- */
-static inline u32 ufshci_version(u32 major, u32 minor)
-{
- return (major << 8) + (minor << 4);
-}
-
-/*
- * HCDDID - Host Controller Identification Descriptor
- * - Device ID and Device Class 10h
- */
-#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
-#define DEVICE_ID UFS_MASK(0xFF, 24)
-
-/*
- * HCPMID - Host Controller Identification Descriptor
- * - Product/Manufacturer ID 14h
- */
-#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
-#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-
-/* AHIT - Auto-Hibernate Idle Timer */
-#define UFSHCI_AHIBERN8_TIMER_MASK GENMASK(9, 0)
-#define UFSHCI_AHIBERN8_SCALE_MASK GENMASK(12, 10)
-#define UFSHCI_AHIBERN8_SCALE_FACTOR 10
-#define UFSHCI_AHIBERN8_MAX (1023 * 100000)
-
-/*
- * IS - Interrupt Status - 20h
- */
-#define UTP_TRANSFER_REQ_COMPL 0x1
-#define UIC_DME_END_PT_RESET 0x2
-#define UIC_ERROR 0x4
-#define UIC_TEST_MODE 0x8
-#define UIC_POWER_MODE 0x10
-#define UIC_HIBERNATE_EXIT 0x20
-#define UIC_HIBERNATE_ENTER 0x40
-#define UIC_LINK_LOST 0x80
-#define UIC_LINK_STARTUP 0x100
-#define UTP_TASK_REQ_COMPL 0x200
-#define UIC_COMMAND_COMPL 0x400
-#define DEVICE_FATAL_ERROR 0x800
-#define CONTROLLER_FATAL_ERROR 0x10000
-#define SYSTEM_BUS_FATAL_ERROR 0x20000
-#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
-
-#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
- UIC_HIBERNATE_EXIT)
-
-#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\
- UIC_POWER_MODE)
-
-#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
-
-#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR |\
- UIC_LINK_LOST)
-
-/* HCS - Host Controller Status 30h */
-#define DEVICE_PRESENT 0x1
-#define UTP_TRANSFER_REQ_LIST_READY 0x2
-#define UTP_TASK_REQ_LIST_READY 0x4
-#define UIC_COMMAND_READY 0x8
-#define HOST_ERROR_INDICATOR 0x10
-#define DEVICE_ERROR_INDICATOR 0x20
-#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
-
-#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
- UTP_TASK_REQ_LIST_READY |\
- UIC_COMMAND_READY)
-
-enum {
- PWR_OK = 0x0,
- PWR_LOCAL = 0x01,
- PWR_REMOTE = 0x02,
- PWR_BUSY = 0x03,
- PWR_ERROR_CAP = 0x04,
- PWR_FATAL_ERROR = 0x05,
-};
-
-/* HCE - Host Controller Enable 34h */
-#define CONTROLLER_ENABLE 0x1
-#define CONTROLLER_DISABLE 0x0
-#define CRYPTO_GENERAL_ENABLE 0x2
-
-/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
-#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
-#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
-#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
-#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
-
-/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
-#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
-#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
-#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
-#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
-#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
-#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
-#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
-#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
-
-/* UECN - Host UIC Error Code Network Layer 40h */
-#define UIC_NETWORK_LAYER_ERROR 0x80000000
-#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
-#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
-#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
-
-/* UECT - Host UIC Error Code Transport Layer 44h */
-#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
-#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
-#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
-#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
-#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
-#define UIC_TRANSPORT_BAD_TC 0x10
-#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
-#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
-
-/* UECDME - Host UIC Error Code DME 48h */
-#define UIC_DME_ERROR 0x80000000
-#define UIC_DME_ERROR_CODE_MASK 0x1
-
-/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
-#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
-#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
-#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
-#define INT_AGGR_STATUS_BIT 0x100000
-#define INT_AGGR_PARAM_WRITE 0x1000000
-#define INT_AGGR_ENABLE 0x80000000
-
-/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
-#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
-#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UICCMD - UIC Command */
-#define COMMAND_OPCODE_MASK 0xFF
-#define GEN_SELECTOR_INDEX_MASK 0xFFFF
-
-#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
-#define RESET_LEVEL 0xFF
-
-#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
-#define CONFIG_RESULT_CODE_MASK 0xFF
-#define GENERIC_ERROR_CODE_MASK 0xFF
-
-/* GenSelectorIndex calculation macros for M-PHY attributes */
-#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
-#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
-
-#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
- ((sel) & 0xFFFF))
-#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
-#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
-#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
-
-/* Link Status*/
-enum link_status {
- UFSHCD_LINK_IS_DOWN = 1,
- UFSHCD_LINK_IS_UP = 2,
-};
-
-/* UIC Commands */
-enum uic_cmd_dme {
- UIC_CMD_DME_GET = 0x01,
- UIC_CMD_DME_SET = 0x02,
- UIC_CMD_DME_PEER_GET = 0x03,
- UIC_CMD_DME_PEER_SET = 0x04,
- UIC_CMD_DME_POWERON = 0x10,
- UIC_CMD_DME_POWEROFF = 0x11,
- UIC_CMD_DME_ENABLE = 0x12,
- UIC_CMD_DME_RESET = 0x14,
- UIC_CMD_DME_END_PT_RST = 0x15,
- UIC_CMD_DME_LINK_STARTUP = 0x16,
- UIC_CMD_DME_HIBER_ENTER = 0x17,
- UIC_CMD_DME_HIBER_EXIT = 0x18,
- UIC_CMD_DME_TEST_MODE = 0x1A,
-};
-
-/* UIC Config result code / Generic error code */
-enum {
- UIC_CMD_RESULT_SUCCESS = 0x00,
- UIC_CMD_RESULT_INVALID_ATTR = 0x01,
- UIC_CMD_RESULT_FAILURE = 0x01,
- UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
- UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
- UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
- UIC_CMD_RESULT_BAD_INDEX = 0x05,
- UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
- UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
- UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
- UIC_CMD_RESULT_BUSY = 0x09,
- UIC_CMD_RESULT_DME_FAILURE = 0x0A,
-};
-
-#define MASK_UIC_COMMAND_RESULT 0xFF
-
-#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
-#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
-
-/* Interrupt disable masks */
-enum {
- /* Interrupt disable mask for UFSHCI v1.0 */
- INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
- INTERRUPT_MASK_RW_VER_10 = 0x30000,
-
- /* Interrupt disable mask for UFSHCI v1.1 */
- INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
-
- /* Interrupt disable mask for UFSHCI v2.1 */
- INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
-};
-
-/* CCAP - Crypto Capability 100h */
-union ufs_crypto_capabilities {
- __le32 reg_val;
- struct {
- u8 num_crypto_cap;
- u8 config_count;
- u8 reserved;
- u8 config_array_ptr;
- };
-};
-
-enum ufs_crypto_key_size {
- UFS_CRYPTO_KEY_SIZE_INVALID = 0x0,
- UFS_CRYPTO_KEY_SIZE_128 = 0x1,
- UFS_CRYPTO_KEY_SIZE_192 = 0x2,
- UFS_CRYPTO_KEY_SIZE_256 = 0x3,
- UFS_CRYPTO_KEY_SIZE_512 = 0x4,
-};
-
-enum ufs_crypto_alg {
- UFS_CRYPTO_ALG_AES_XTS = 0x0,
- UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
- UFS_CRYPTO_ALG_AES_ECB = 0x2,
- UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
-};
-
-/* x-CRYPTOCAP - Crypto Capability X */
-union ufs_crypto_cap_entry {
- __le32 reg_val;
- struct {
- u8 algorithm_id;
- u8 sdus_mask; /* Supported data unit size mask */
- u8 key_size;
- u8 reserved;
- };
-};
-
-#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
-#define UFS_CRYPTO_KEY_MAX_SIZE 64
-/* x-CRYPTOCFG - Crypto Configuration X */
-union ufs_crypto_cfg_entry {
- __le32 reg_val[32];
- struct {
- u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE];
- u8 data_unit_size;
- u8 crypto_cap_idx;
- u8 reserved_1;
- u8 config_enable;
- u8 reserved_multi_host;
- u8 reserved_2;
- u8 vsb[2];
- u8 reserved_3[56];
- };
-};
-
-/*
- * Request Descriptor Definitions
- */
-
-/* Transfer request command type */
-enum {
- UTP_CMD_TYPE_SCSI = 0x0,
- UTP_CMD_TYPE_UFS = 0x1,
- UTP_CMD_TYPE_DEV_MANAGE = 0x2,
-};
-
-/* To accommodate UFS2.0 required Command type */
-enum {
- UTP_CMD_TYPE_UFS_STORAGE = 0x1,
-};
-
-enum {
- UTP_SCSI_COMMAND = 0x00000000,
- UTP_NATIVE_UFS_COMMAND = 0x10000000,
- UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
-};
-
-/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
-};
-
-/* Overall command status values */
-enum utp_ocs {
- OCS_SUCCESS = 0x0,
- OCS_INVALID_CMD_TABLE_ATTR = 0x1,
- OCS_INVALID_PRDT_ATTR = 0x2,
- OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
- OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
- OCS_PEER_COMM_FAILURE = 0x5,
- OCS_ABORTED = 0x6,
- OCS_FATAL_ERROR = 0x7,
- OCS_DEVICE_FATAL_ERROR = 0x8,
- OCS_INVALID_CRYPTO_CONFIG = 0x9,
- OCS_GENERAL_CRYPTO_ERROR = 0xA,
- OCS_INVALID_COMMAND_STATUS = 0x0F,
-};
-
-enum {
- MASK_OCS = 0x0F,
-};
-
-/* The maximum length of the data byte count field in the PRDT is 256KB */
-#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
-/* The granularity of the data byte count field in the PRDT is 32-bit */
-#define PRDT_DATA_BYTE_COUNT_PAD 4
-
-/**
- * struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @addr: Physical address; DW-0 and DW-1.
- * @reserved: Reserved for future use DW-2
- * @size: size of physical segment DW-3
- */
-struct ufshcd_sg_entry {
- __le64 addr;
- __le32 reserved;
- __le32 size;
-};
-
-/**
- * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
- * @command_upiu: Command UPIU Frame address
- * @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
- */
-struct utp_transfer_cmd_desc {
- u8 command_upiu[ALIGNED_UPIU_SIZE];
- u8 response_upiu[ALIGNED_UPIU_SIZE];
- struct ufshcd_sg_entry prd_table[SG_ALL];
-};
-
-/**
- * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
- */
-struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
-
-/**
- * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
- * @header: UTRD header DW-0 to DW-3
- * @command_desc_base_addr_lo: UCD base address low DW-4
- * @command_desc_base_addr_hi: UCD base address high DW-5
- * @response_upiu_length: response UPIU length DW-6
- * @response_upiu_offset: response UPIU offset DW-6
- * @prd_table_length: Physical region descriptor length DW-7
- * @prd_table_offset: Physical region descriptor offset DW-7
- */
-struct utp_transfer_req_desc {
-
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-5*/
- __le32 command_desc_base_addr_lo;
- __le32 command_desc_base_addr_hi;
-
- /* DW 6 */
- __le16 response_upiu_length;
- __le16 response_upiu_offset;
-
- /* DW 7 */
- __le16 prd_table_length;
- __le16 prd_table_offset;
-};
-
-/*
- * UTMRD structure.
- */
-struct utp_task_req_desc {
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-11 - Task request UPIU structure */
- struct {
- struct utp_upiu_header req_header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 __reserved1[2];
- } upiu_req;
-
- /* DW 12-19 - Task Management Response UPIU structure */
- struct {
- struct utp_upiu_header rsp_header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 __reserved2[3];
- } upiu_rsp;
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
deleted file mode 100644
index 0521f887e3ac..000000000000
--- a/drivers/scsi/ufs/unipro.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- */
-
-#ifndef _UNIPRO_H_
-#define _UNIPRO_H_
-
-/*
- * M-TX Configuration Attributes
- */
-#define TX_HIBERN8TIME_CAPABILITY 0x000F
-#define TX_MODE 0x0021
-#define TX_HSRATE_SERIES 0x0022
-#define TX_HSGEAR 0x0023
-#define TX_PWMGEAR 0x0024
-#define TX_AMPLITUDE 0x0025
-#define TX_HS_SLEWRATE 0x0026
-#define TX_SYNC_SOURCE 0x0027
-#define TX_HS_SYNC_LENGTH 0x0028
-#define TX_HS_PREPARE_LENGTH 0x0029
-#define TX_LS_PREPARE_LENGTH 0x002A
-#define TX_HIBERN8_CONTROL 0x002B
-#define TX_LCC_ENABLE 0x002C
-#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
-#define TX_BYPASS_8B10B_ENABLE 0x002E
-#define TX_DRIVER_POLARITY 0x002F
-#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
-#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
-#define TX_LCC_SEQUENCER 0x0032
-#define TX_MIN_ACTIVATETIME 0x0033
-#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
-#define TX_REFCLKFREQ 0x00EB
-#define TX_CFGCLKFREQVAL 0x00EC
-#define CFGEXTRATTR 0x00F0
-#define DITHERCTRL2 0x00F1
-
-/*
- * M-RX Configuration Attributes
- */
-#define RX_MODE 0x00A1
-#define RX_HSRATE_SERIES 0x00A2
-#define RX_HSGEAR 0x00A3
-#define RX_PWMGEAR 0x00A4
-#define RX_LS_TERMINATED_ENABLE 0x00A5
-#define RX_HS_UNTERMINATED_ENABLE 0x00A6
-#define RX_ENTER_HIBERN8 0x00A7
-#define RX_BYPASS_8B10B_ENABLE 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE 0x00A9
-#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
-#define RX_HIBERN8TIME_CAPABILITY 0x0092
-#define RX_REFCLKFREQ 0x00EB
-#define RX_CFGCLKFREQVAL 0x00EC
-#define CFGWIDEINLN 0x00F0
-#define CFGRXCDR8 0x00BA
-#define ENARXDIRECTCFG4 0x00F2
-#define CFGRXOVR8 0x00BD
-#define RXDIRECTCTRL2 0x00C7
-#define ENARXDIRECTCFG3 0x00F3
-#define RXCALCTRL 0x00B4
-#define ENARXDIRECTCFG2 0x00F4
-#define CFGRXOVR4 0x00E9
-#define RXSQCTRL 0x00B5
-#define CFGRXOVR6 0x00BF
-#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
-#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
-#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
-#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
-#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
-#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
-#define RX_ADV_GRANULARITY_CAP 0x0098
-#define RX_MIN_ACTIVATETIME_CAP 0x008F
-#define RX_HIBERN8TIME_CAP 0x0092
-#define RX_ADV_HIBERN8TIME_CAP 0x0099
-#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
-
-
-#define is_mphy_tx_attr(attr) (attr < RX_MODE)
-#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
-#define SYNC_LEN_FINE(x) ((x) & 0x3F)
-#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F))
-#define PREP_LEN(x) ((x) & 0xF)
-
-#define RX_MIN_ACTIVATETIME_UNIT_US 100
-#define HIBERN8TIME_UNIT_US 100
-
-/*
- * Common Block Attributes
- */
-#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
-#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
-#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
-#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
-#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
-#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
-#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
-#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
-#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
-#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
-
-#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
-
-/*
- * PHY Adapter attributes
- */
-#define PA_ACTIVETXDATALANES 0x1560
-#define PA_ACTIVERXDATALANES 0x1580
-#define PA_TXTRAILINGCLOCKS 0x1564
-#define PA_PHY_TYPE 0x1500
-#define PA_AVAILTXDATALANES 0x1520
-#define PA_AVAILRXDATALANES 0x1540
-#define PA_MINRXTRAILINGCLOCKS 0x1543
-#define PA_TXPWRSTATUS 0x1567
-#define PA_RXPWRSTATUS 0x1582
-#define PA_TXFORCECLOCK 0x1562
-#define PA_TXPWRMODE 0x1563
-#define PA_LEGACYDPHYESCDL 0x1570
-#define PA_MAXTXSPEEDFAST 0x1521
-#define PA_MAXTXSPEEDSLOW 0x1522
-#define PA_MAXRXSPEEDFAST 0x1541
-#define PA_MAXRXSPEEDSLOW 0x1542
-#define PA_TXLINKSTARTUPHS 0x1544
-#define PA_LOCAL_TX_LCC_ENABLE 0x155E
-#define PA_TXSPEEDFAST 0x1565
-#define PA_TXSPEEDSLOW 0x1566
-#define PA_REMOTEVERINFO 0x15A0
-#define PA_TXGEAR 0x1568
-#define PA_TXTERMINATION 0x1569
-#define PA_HSSERIES 0x156A
-#define PA_PWRMODE 0x1571
-#define PA_RXGEAR 0x1583
-#define PA_RXTERMINATION 0x1584
-#define PA_MAXRXPWMGEAR 0x1586
-#define PA_MAXRXHSGEAR 0x1587
-#define PA_RXHSUNTERMCAP 0x15A5
-#define PA_RXLSTERMCAP 0x15A6
-#define PA_GRANULARITY 0x15AA
-#define PA_PACPREQTIMEOUT 0x1590
-#define PA_PACPREQEOBTIMEOUT 0x1591
-#define PA_HIBERN8TIME 0x15A7
-#define PA_LOCALVERINFO 0x15A9
-#define PA_GRANULARITY 0x15AA
-#define PA_TACTIVATE 0x15A8
-#define PA_PACPFRAMECOUNT 0x15C0
-#define PA_PACPERRORCOUNT 0x15C1
-#define PA_PHYTESTCONTROL 0x15C2
-#define PA_PWRMODEUSERDATA0 0x15B0
-#define PA_PWRMODEUSERDATA1 0x15B1
-#define PA_PWRMODEUSERDATA2 0x15B2
-#define PA_PWRMODEUSERDATA3 0x15B3
-#define PA_PWRMODEUSERDATA4 0x15B4
-#define PA_PWRMODEUSERDATA5 0x15B5
-#define PA_PWRMODEUSERDATA6 0x15B6
-#define PA_PWRMODEUSERDATA7 0x15B7
-#define PA_PWRMODEUSERDATA8 0x15B8
-#define PA_PWRMODEUSERDATA9 0x15B9
-#define PA_PWRMODEUSERDATA10 0x15BA
-#define PA_PWRMODEUSERDATA11 0x15BB
-#define PA_CONNECTEDTXDATALANES 0x1561
-#define PA_CONNECTEDRXDATALANES 0x1581
-#define PA_LOGICALLANEMAP 0x15A1
-#define PA_SLEEPNOCONFIGTIME 0x15A2
-#define PA_STALLNOCONFIGTIME 0x15A3
-#define PA_SAVECONFIGTIME 0x15A4
-#define PA_TXHSADAPTTYPE 0x15D4
-
-/* Adpat type for PA_TXHSADAPTTYPE attribute */
-#define PA_REFRESH_ADAPT 0x00
-#define PA_INITIAL_ADAPT 0x01
-#define PA_NO_ADAPT 0x03
-
-#define PA_TACTIVATE_TIME_UNIT_US 10
-#define PA_HIBERN8_TIME_UNIT_US 100
-
-/*Other attributes*/
-#define VS_MPHYCFGUPDT 0xD085
-#define VS_DEBUGOMC 0xD09E
-#define VS_POWERSTATE 0xD083
-
-#define PA_GRANULARITY_MIN_VAL 1
-#define PA_GRANULARITY_MAX_VAL 6
-
-/* PHY Adapter Protocol Constants */
-#define PA_MAXDATALANES 4
-
-#define DL_FC0ProtectionTimeOutVal_Default 8191
-#define DL_TC0ReplayTimeOutVal_Default 65535
-#define DL_AFC0ReqTimeOutVal_Default 32767
-#define DL_FC1ProtectionTimeOutVal_Default 8191
-#define DL_TC1ReplayTimeOutVal_Default 65535
-#define DL_AFC1ReqTimeOutVal_Default 32767
-
-#define DME_LocalFC0ProtectionTimeOutVal 0xD041
-#define DME_LocalTC0ReplayTimeOutVal 0xD042
-#define DME_LocalAFC0ReqTimeOutVal 0xD043
-
-/* PA power modes */
-enum {
- FAST_MODE = 1,
- SLOW_MODE = 2,
- FASTAUTO_MODE = 4,
- SLOWAUTO_MODE = 5,
- UNCHANGED = 7,
-};
-
-#define PWRMODE_MASK 0xF
-#define PWRMODE_RX_OFFSET 4
-
-/* PA TX/RX Frequency Series */
-enum {
- PA_HS_MODE_A = 1,
- PA_HS_MODE_B = 2,
-};
-
-enum ufs_pwm_gear_tag {
- UFS_PWM_DONT_CHANGE, /* Don't change Gear */
- UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
- UFS_PWM_G2, /* PWM Gear 2 */
- UFS_PWM_G3, /* PWM Gear 3 */
- UFS_PWM_G4, /* PWM Gear 4 */
- UFS_PWM_G5, /* PWM Gear 5 */
- UFS_PWM_G6, /* PWM Gear 6 */
- UFS_PWM_G7, /* PWM Gear 7 */
-};
-
-enum ufs_hs_gear_tag {
- UFS_HS_DONT_CHANGE, /* Don't change Gear */
- UFS_HS_G1, /* HS Gear 1 (default for reset) */
- UFS_HS_G2, /* HS Gear 2 */
- UFS_HS_G3, /* HS Gear 3 */
- UFS_HS_G4, /* HS Gear 4 */
-};
-
-enum ufs_unipro_ver {
- UFS_UNIPRO_VER_RESERVED = 0,
- UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
- UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
- UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
- UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */
- UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */
- UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */
- /* UniPro version field mask in PA_LOCALVERINFO */
- UFS_UNIPRO_VER_MASK = 0xF,
-};
-
-/*
- * Data Link Layer Attributes
- */
-#define DL_TC0TXFCTHRESHOLD 0x2040
-#define DL_FC0PROTTIMEOUTVAL 0x2041
-#define DL_TC0REPLAYTIMEOUTVAL 0x2042
-#define DL_AFC0REQTIMEOUTVAL 0x2043
-#define DL_AFC0CREDITTHRESHOLD 0x2044
-#define DL_TC0OUTACKTHRESHOLD 0x2045
-#define DL_TC1TXFCTHRESHOLD 0x2060
-#define DL_FC1PROTTIMEOUTVAL 0x2061
-#define DL_TC1REPLAYTIMEOUTVAL 0x2062
-#define DL_AFC1REQTIMEOUTVAL 0x2063
-#define DL_AFC1CREDITTHRESHOLD 0x2064
-#define DL_TC1OUTACKTHRESHOLD 0x2065
-#define DL_TXPREEMPTIONCAP 0x2000
-#define DL_TC0TXMAXSDUSIZE 0x2001
-#define DL_TC0RXINITCREDITVAL 0x2002
-#define DL_TC0TXBUFFERSIZE 0x2005
-#define DL_PEERTC0PRESENT 0x2046
-#define DL_PEERTC0RXINITCREVAL 0x2047
-#define DL_TC1TXMAXSDUSIZE 0x2003
-#define DL_TC1RXINITCREDITVAL 0x2004
-#define DL_TC1TXBUFFERSIZE 0x2006
-#define DL_PEERTC1PRESENT 0x2066
-#define DL_PEERTC1RXINITCREVAL 0x2067
-
-/*
- * Network Layer Attributes
- */
-#define N_DEVICEID 0x3000
-#define N_DEVICEID_VALID 0x3001
-#define N_TC0TXMAXSDUSIZE 0x3020
-#define N_TC1TXMAXSDUSIZE 0x3021
-
-/*
- * Transport Layer Attributes
- */
-#define T_NUMCPORTS 0x4000
-#define T_NUMTESTFEATURES 0x4001
-#define T_CONNECTIONSTATE 0x4020
-#define T_PEERDEVICEID 0x4021
-#define T_PEERCPORTID 0x4022
-#define T_TRAFFICCLASS 0x4023
-#define T_PROTOCOLID 0x4024
-#define T_CPORTFLAGS 0x4025
-#define T_TXTOKENVALUE 0x4026
-#define T_RXTOKENVALUE 0x4027
-#define T_LOCALBUFFERSPACE 0x4028
-#define T_PEERBUFFERSPACE 0x4029
-#define T_CREDITSTOSEND 0x402A
-#define T_CPORTMODE 0x402B
-#define T_TC0TXMAXSDUSIZE 0x4060
-#define T_TC1TXMAXSDUSIZE 0x4061
-
-/* CPort setting */
-#define E2EFC_ON (1 << 0)
-#define E2EFC_OFF (0 << 0)
-#define CSD_N_ON (0 << 1)
-#define CSD_N_OFF (1 << 1)
-#define CSV_N_ON (0 << 2)
-#define CSV_N_OFF (1 << 2)
-#define CPORT_DEF_FLAGS (CSV_N_OFF | CSD_N_OFF | E2EFC_OFF)
-
-/* CPort connection state */
-enum {
- CPORT_IDLE = 0,
- CPORT_CONNECTED,
-};
-
-#endif /* _UNIPRO_H_ */
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index ec58091fc948..c0c4f895d76e 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -510,10 +510,8 @@ static int qcom_slim_probe(struct platform_device *pdev)
}
ctrl->irq = platform_get_irq(pdev, 0);
- if (ctrl->irq < 0) {
- dev_err(&pdev->dev, "no slimbus IRQ\n");
+ if (ctrl->irq < 0)
return ctrl->irq;
- }
sctrl = &ctrl->ctrl;
sctrl->dev = &pdev->dev;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 7040293c2ee8..0aa8408464ad 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1434,6 +1434,7 @@ static int of_qcom_slim_ngd_register(struct device *parent,
const struct of_device_id *match;
struct device_node *node;
u32 id;
+ int ret;
match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node);
data = match->data;
@@ -1455,7 +1456,17 @@ static int of_qcom_slim_ngd_register(struct device *parent,
}
ngd->id = id;
ngd->pdev->dev.parent = parent;
- ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+
+ ret = driver_set_override(&ngd->pdev->dev,
+ &ngd->pdev->driver_override,
+ QCOM_SLIM_NGD_DRV_NAME,
+ strlen(QCOM_SLIM_NGD_DRV_NAME));
+ if (ret) {
+ platform_device_put(ngd->pdev);
+ kfree(ngd);
+ of_node_put(node);
+ return ret;
+ }
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
@@ -1526,13 +1537,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "no slimbus IRQ resource\n");
- return -ENODEV;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+ ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
if (ret) {
dev_err(&pdev->dev, "request IRQ failed\n");
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c5aae42673d3..86ccf5970bc1 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -14,6 +14,7 @@ source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/litex/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/microchip/Kconfig"
+source "drivers/soc/pxa/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index e8228c4e5d18..919716e0e700 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
obj-y += microchip/
+obj-y += pxa/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 9154c7029b05..291086bb9313 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -459,7 +459,7 @@ static const struct of_device_id ixp4xx_qmgr_of_match[] = {
static struct platform_driver ixp4xx_qmgr_driver = {
.driver = {
.name = "ixp4xx-qmgr",
- .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ .of_match_table = ixp4xx_qmgr_of_match,
},
.probe = ixp4xx_qmgr_probe,
.remove = ixp4xx_qmgr_remove,
diff --git a/drivers/soc/pxa/Kconfig b/drivers/soc/pxa/Kconfig
new file mode 100644
index 000000000000..c5c265aa4f07
--- /dev/null
+++ b/drivers/soc/pxa/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config PLAT_PXA
+ bool
+
+config PXA_SSP
+ tristate
+ help
+ Enable support for PXA2xx SSP ports
diff --git a/drivers/soc/pxa/Makefile b/drivers/soc/pxa/Makefile
new file mode 100644
index 000000000000..413deceddbdd
--- /dev/null
+++ b/drivers/soc/pxa/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PXA3xx) += mfp.o
+obj-$(CONFIG_ARCH_MMP) += mfp.o
+
+obj-$(CONFIG_PXA_SSP) += ssp.o
diff --git a/drivers/soc/pxa/mfp.c b/drivers/soc/pxa/mfp.c
new file mode 100644
index 000000000000..6220ba321cfc
--- /dev/null
+++ b/drivers/soc/pxa/mfp.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/plat-pxa/mfp.c
+ *
+ * Multi-Function Pin Support
+ *
+ * Copyright (C) 2007 Marvell Internation Ltd.
+ *
+ * 2007-08-21: eric miao <eric.miao@marvell.com>
+ * initial version
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <linux/soc/pxa/mfp.h>
+
+#define MFPR_SIZE (PAGE_SIZE)
+
+/* MFPR register bit definitions */
+#define MFPR_PULL_SEL (0x1 << 15)
+#define MFPR_PULLUP_EN (0x1 << 14)
+#define MFPR_PULLDOWN_EN (0x1 << 13)
+#define MFPR_SLEEP_SEL (0x1 << 9)
+#define MFPR_SLEEP_OE_N (0x1 << 7)
+#define MFPR_EDGE_CLEAR (0x1 << 6)
+#define MFPR_EDGE_FALL_EN (0x1 << 5)
+#define MFPR_EDGE_RISE_EN (0x1 << 4)
+
+#define MFPR_SLEEP_DATA(x) ((x) << 8)
+#define MFPR_DRIVE(x) (((x) & 0x7) << 10)
+#define MFPR_AF_SEL(x) (((x) & 0x7) << 0)
+
+#define MFPR_EDGE_NONE (0)
+#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN)
+#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN)
+#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL)
+
+/*
+ * Table that determines the low power modes outputs, with actual settings
+ * used in parentheses for don't-care values. Except for the float output,
+ * the configured driven and pulled levels match, so if there is a need for
+ * non-LPM pulled output, the same configuration could probably be used.
+ *
+ * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel
+ * (bit 7) (bit 8) (bit 14) (bit 13) (bit 15)
+ *
+ * Input 0 X(0) X(0) X(0) 0
+ * Drive 0 0 0 0 X(1) 0
+ * Drive 1 0 1 X(1) 0 0
+ * Pull hi (1) 1 X(1) 1 0 0
+ * Pull lo (0) 1 X(0) 0 1 0
+ * Z (float) 1 X(0) 0 0 0
+ */
+#define MFPR_LPM_INPUT (0)
+#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN)
+#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN)
+#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N)
+#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N)
+#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N)
+#define MFPR_LPM_MASK (0xe080)
+
+/*
+ * The pullup and pulldown state of the MFP pin at run mode is by default
+ * determined by the selected alternate function. In case that some buggy
+ * devices need to override this default behavior, the definitions below
+ * indicates the setting of corresponding MFPR bits
+ *
+ * Definition pull_sel pullup_en pulldown_en
+ * MFPR_PULL_NONE 0 0 0
+ * MFPR_PULL_LOW 1 0 1
+ * MFPR_PULL_HIGH 1 1 0
+ * MFPR_PULL_BOTH 1 1 1
+ * MFPR_PULL_FLOAT 1 0 0
+ */
+#define MFPR_PULL_NONE (0)
+#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN)
+#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN)
+#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN)
+#define MFPR_PULL_FLOAT (MFPR_PULL_SEL)
+
+/* mfp_spin_lock is used to ensure that MFP register configuration
+ * (most likely a read-modify-write operation) is atomic, and that
+ * mfp_table[] is consistent
+ */
+static DEFINE_SPINLOCK(mfp_spin_lock);
+
+static void __iomem *mfpr_mmio_base;
+
+struct mfp_pin {
+ unsigned long config; /* -1 for not configured */
+ unsigned long mfpr_off; /* MFPRxx Register offset */
+ unsigned long mfpr_run; /* Run-Mode Register Value */
+ unsigned long mfpr_lpm; /* Low Power Mode Register Value */
+};
+
+static struct mfp_pin mfp_table[MFP_PIN_MAX];
+
+/* mapping of MFP_LPM_* definitions to MFPR_LPM_* register bits */
+static const unsigned long mfpr_lpm[] = {
+ MFPR_LPM_INPUT,
+ MFPR_LPM_DRIVE_LOW,
+ MFPR_LPM_DRIVE_HIGH,
+ MFPR_LPM_PULL_LOW,
+ MFPR_LPM_PULL_HIGH,
+ MFPR_LPM_FLOAT,
+ MFPR_LPM_INPUT,
+};
+
+/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */
+static const unsigned long mfpr_pull[] = {
+ MFPR_PULL_NONE,
+ MFPR_PULL_LOW,
+ MFPR_PULL_HIGH,
+ MFPR_PULL_BOTH,
+ MFPR_PULL_FLOAT,
+};
+
+/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */
+static const unsigned long mfpr_edge[] = {
+ MFPR_EDGE_NONE,
+ MFPR_EDGE_RISE,
+ MFPR_EDGE_FALL,
+ MFPR_EDGE_BOTH,
+};
+
+#define mfpr_readl(off) \
+ __raw_readl(mfpr_mmio_base + (off))
+
+#define mfpr_writel(off, val) \
+ __raw_writel(val, mfpr_mmio_base + (off))
+
+#define mfp_configured(p) ((p)->config != -1)
+
+/*
+ * perform a read-back of any valid MFPR register to make sure the
+ * previous writings are finished
+ */
+static unsigned long mfpr_off_readback;
+#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
+
+static inline void __mfp_config_run(struct mfp_pin *p)
+{
+ if (mfp_configured(p))
+ mfpr_writel(p->mfpr_off, p->mfpr_run);
+}
+
+static inline void __mfp_config_lpm(struct mfp_pin *p)
+{
+ if (mfp_configured(p)) {
+ unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR;
+ if (mfpr_clr != p->mfpr_run)
+ mfpr_writel(p->mfpr_off, mfpr_clr);
+ if (p->mfpr_lpm != mfpr_clr)
+ mfpr_writel(p->mfpr_off, p->mfpr_lpm);
+ }
+}
+
+void mfp_config(unsigned long *mfp_cfgs, int num)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+
+ for (i = 0; i < num; i++, mfp_cfgs++) {
+ unsigned long tmp, c = *mfp_cfgs;
+ struct mfp_pin *p;
+ int pin, af, drv, lpm, edge, pull;
+
+ pin = MFP_PIN(c);
+ BUG_ON(pin >= MFP_PIN_MAX);
+ p = &mfp_table[pin];
+
+ af = MFP_AF(c);
+ drv = MFP_DS(c);
+ lpm = MFP_LPM_STATE(c);
+ edge = MFP_LPM_EDGE(c);
+ pull = MFP_PULL(c);
+
+ /* run-mode pull settings will conflict with MFPR bits of
+ * low power mode state, calculate mfpr_run and mfpr_lpm
+ * individually if pull != MFP_PULL_NONE
+ */
+ tmp = MFPR_AF_SEL(af) | MFPR_DRIVE(drv);
+
+ if (likely(pull == MFP_PULL_NONE)) {
+ p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
+ p->mfpr_lpm = p->mfpr_run;
+ } else {
+ p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
+ p->mfpr_run = tmp | mfpr_pull[pull];
+ }
+
+ p->config = c; __mfp_config_run(p);
+ }
+
+ mfpr_sync();
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+unsigned long mfp_read(int mfp)
+{
+ unsigned long val, flags;
+
+ BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+ val = mfpr_readl(mfp_table[mfp].mfpr_off);
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+
+ return val;
+}
+
+void mfp_write(int mfp, unsigned long val)
+{
+ unsigned long flags;
+
+ BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+ mfpr_writel(mfp_table[mfp].mfpr_off, val);
+ mfpr_sync();
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+void __init mfp_init_base(void __iomem *mfpr_base)
+{
+ int i;
+
+ /* initialize the table with default - unconfigured */
+ for (i = 0; i < ARRAY_SIZE(mfp_table); i++)
+ mfp_table[i].config = -1;
+
+ mfpr_mmio_base = mfpr_base;
+}
+
+void __init mfp_init_addr(struct mfp_addr_map *map)
+{
+ struct mfp_addr_map *p;
+ unsigned long offset, flags;
+ int i;
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+
+ /* mfp offset for readback */
+ mfpr_off_readback = map[0].offset;
+
+ for (p = map; p->start != MFP_PIN_INVALID; p++) {
+ offset = p->offset;
+ i = p->start;
+
+ do {
+ mfp_table[i].mfpr_off = offset;
+ mfp_table[i].mfpr_run = 0;
+ mfp_table[i].mfpr_lpm = 0;
+ offset += 4; i++;
+ } while ((i <= p->end) && (p->end != -1));
+ }
+
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+void mfp_config_lpm(void)
+{
+ struct mfp_pin *p = &mfp_table[0];
+ int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
+ __mfp_config_lpm(p);
+}
+
+void mfp_config_run(void)
+{
+ struct mfp_pin *p = &mfp_table[0];
+ int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
+ __mfp_config_run(p);
+}
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
new file mode 100644
index 000000000000..563440315acd
--- /dev/null
+++ b/drivers/soc/pxa/ssp.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/mach-pxa/ssp.c
+ *
+ * based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
+ *
+ * Copyright (C) 2003 Russell King.
+ * Copyright (C) 2003 Wolfson Microelectronics PLC
+ *
+ * PXA2xx SSP driver. This provides the generic core for simple
+ * IO-based SSP applications and allows easy port setup for DMA access.
+ *
+ * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/irq.h>
+
+static DEFINE_MUTEX(ssp_lock);
+static LIST_HEAD(ssp_list);
+
+struct ssp_device *pxa_ssp_request(int port, const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->port_id == port && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request);
+
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+ const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->of_node == of_node && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request_of);
+
+void pxa_ssp_free(struct ssp_device *ssp)
+{
+ mutex_lock(&ssp_lock);
+ if (ssp->use_count) {
+ ssp->use_count--;
+ ssp->label = NULL;
+ } else
+ dev_err(ssp->dev, "device already free\n");
+ mutex_unlock(&ssp_lock);
+}
+EXPORT_SYMBOL(pxa_ssp_free);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+ { .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
+ { .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
+ { .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
+ { .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
+ { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
+ { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
+ { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
+#endif
+
+static int pxa_ssp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct ssp_device *ssp;
+ struct device *dev = &pdev->dev;
+
+ ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
+ if (ssp == NULL)
+ return -ENOMEM;
+
+ ssp->dev = dev;
+
+ ssp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "failed to request memory resource\n");
+ return -EBUSY;
+ }
+
+ ssp->phys_base = res->start;
+
+ ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (ssp->mmio_base == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
+ }
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0) {
+ dev_err(dev, "no IRQ resource defined\n");
+ return -ENODEV;
+ }
+
+ if (dev->of_node) {
+ const struct of_device_id *id =
+ of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
+ ssp->type = (int) id->data;
+ } else {
+ const struct platform_device_id *id =
+ platform_get_device_id(pdev);
+ ssp->type = (int) id->driver_data;
+
+ /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
+ * starts from 0, do a translation here
+ */
+ ssp->port_id = pdev->id + 1;
+ }
+
+ ssp->use_count = 0;
+ ssp->of_node = dev->of_node;
+
+ mutex_lock(&ssp_lock);
+ list_add(&ssp->node, &ssp_list);
+ mutex_unlock(&ssp_lock);
+
+ platform_set_drvdata(pdev, ssp);
+
+ return 0;
+}
+
+static int pxa_ssp_remove(struct platform_device *pdev)
+{
+ struct ssp_device *ssp;
+
+ ssp = platform_get_drvdata(pdev);
+ if (ssp == NULL)
+ return -ENODEV;
+
+ mutex_lock(&ssp_lock);
+ list_del(&ssp->node);
+ mutex_unlock(&ssp_lock);
+
+ return 0;
+}
+
+static const struct platform_device_id ssp_id_table[] = {
+ { "pxa25x-ssp", PXA25x_SSP },
+ { "pxa25x-nssp", PXA25x_NSSP },
+ { "pxa27x-ssp", PXA27x_SSP },
+ { "pxa3xx-ssp", PXA3xx_SSP },
+ { "pxa168-ssp", PXA168_SSP },
+ { "pxa910-ssp", PXA910_SSP },
+ { },
+};
+
+static struct platform_driver pxa_ssp_driver = {
+ .probe = pxa_ssp_probe,
+ .remove = pxa_ssp_remove,
+ .driver = {
+ .name = "pxa2xx-ssp",
+ .of_match_table = of_match_ptr(pxa_ssp_of_ids),
+ },
+ .id_table = ssp_id_table,
+};
+
+static int __init pxa_ssp_init(void)
+{
+ return platform_driver_register(&pxa_ssp_driver);
+}
+
+static void __exit pxa_ssp_exit(void)
+{
+ platform_driver_unregister(&pxa_ssp_driver);
+}
+
+arch_initcall(pxa_ssp_init);
+module_exit(pxa_ssp_exit);
+
+MODULE_DESCRIPTION("PXA SSP driver");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 384461b70684..15a3970e3509 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -165,12 +165,14 @@ static int __init rockchip_grf_init(void)
return -ENODEV;
if (!match || !match->data) {
pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
return -EINVAL;
}
grf_info = match->data;
grf = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
return PTR_ERR(grf);
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index b27f8853508e..5dcb7665fe22 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -41,25 +41,37 @@ static int event_manager_availability = -EACCES;
static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
static int sgi_num = XLNX_EVENT_SGI_NUM;
+static bool is_need_to_unregister;
+
+/**
+ * struct agent_cb - Registered callback function and private data.
+ * @agent_data: Data passed back to handler function.
+ * @eve_cb: Function pointer to store the callback function.
+ * @list: member to create list.
+ */
+struct agent_cb {
+ void *agent_data;
+ event_cb_func_t eve_cb;
+ struct list_head list;
+};
+
/**
* struct registered_event_data - Registered Event Data.
* @key: key is the combine id(Node-Id | Event-Id) of type u64
* where upper u32 for Node-Id and lower u32 for Event-Id,
* And this used as key to index into hashmap.
- * @agent_data: Data passed back to handler function.
* @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
- * @eve_cb: Function pointer to store the callback function.
- * @wake: If this flag set, firmware will wakeup processor if is
+ * @wake: If this flag set, firmware will wake up processor if is
* in sleep or power down state.
+ * @cb_list_head: Head of call back data list which contain the information
+ * about registered handler and private data.
* @hentry: hlist_node that hooks this entry into hashtable.
*/
struct registered_event_data {
u64 key;
enum pm_api_cb_id cb_type;
- void *agent_data;
-
- event_cb_func_t eve_cb;
bool wake;
+ struct list_head cb_list_head;
struct hlist_node hentry;
};
@@ -78,29 +90,60 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
event_cb_func_t cb_fun, void *data)
{
u64 key = 0;
+ bool present_in_hash = false;
struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
key = ((u64)node_id << 32U) | (u64)event;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
- pr_err("Found as already registered\n");
- return -EINVAL;
+ present_in_hash = true;
+ break;
}
}
- /* Add new entry if not present */
- eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
- if (!eve_data)
- return -ENOMEM;
+ if (!present_in_hash) {
+ /* Add new entry if not present in HASH table */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+ eve_data->key = key;
+ eve_data->cb_type = PM_NOTIFY_CB;
+ eve_data->wake = wake;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+
+ /* Add into HASH table */
+ hash_add(reg_driver_map, &eve_data->hentry, key);
+ } else {
+ /* Search for callback function and private data in list */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ return 0;
+ }
+ }
- eve_data->key = key;
- eve_data->cb_type = PM_NOTIFY_CB;
- eve_data->eve_cb = cb_fun;
- eve_data->wake = wake;
- eve_data->agent_data = data;
+ /* Add multiple handler and private data in list */
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
- hash_add(reg_driver_map, &eve_data->hentry, key);
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+ }
return 0;
}
@@ -108,6 +151,7 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
{
struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
@@ -124,8 +168,16 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
eve_data->key = 0;
eve_data->cb_type = PM_INIT_SUSPEND_CB;
- eve_data->eve_cb = cb_fun;
- eve_data->agent_data = data;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
@@ -136,15 +188,26 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ is_need_to_unregister = false;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
- if (eve_data->cb_type == PM_INIT_SUSPEND_CB &&
- eve_data->eve_cb == cb_fun) {
- is_callback_found = true;
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
/* remove an object from a hashtable */
hash_del(&eve_data->hentry);
kfree(eve_data);
+ is_need_to_unregister = true;
}
}
if (!is_callback_found) {
@@ -156,20 +219,36 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
}
static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
- event_cb_func_t cb_fun)
+ event_cb_func_t cb_fun, void *data)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
u64 key = ((u64)node_id << 32U) | (u64)event;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ is_need_to_unregister = false;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
- if (eve_data->key == key &&
- eve_data->eve_cb == cb_fun) {
- is_callback_found = true;
- /* remove an object from a hashtable */
- hash_del(&eve_data->hentry);
- kfree(eve_data);
+ if (eve_data->key == key) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
+
+ /* Remove HASH table if callback list is empty */
+ if (list_empty(&eve_data->cb_list_head)) {
+ /* remove an object from a HASH table */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ is_need_to_unregister = true;
+ }
}
}
if (!is_callback_found) {
@@ -241,7 +320,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
}
@@ -263,10 +342,10 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
} else {
- xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
}
return ret;
}
@@ -284,15 +363,18 @@ EXPORT_SYMBOL_GPL(xlnx_register_event);
* @node_id: Node-Id related to event.
* @event: Event Mask for the Error Event.
* @cb_fun: Function pointer of callback function.
+ * @data: Pointer of agent's private data.
*
* Return: Returns 0 on successful unregistration else error code.
*/
int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
- event_cb_func_t cb_fun)
+ event_cb_func_t cb_fun, void *data)
{
- int ret;
+ int ret = 0;
u32 eve, pos;
+ is_need_to_unregister = false;
+
if (event_manager_availability)
return event_manager_availability;
@@ -309,23 +391,26 @@ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, co
} else {
/* Remove Node-Id/Event from hash table */
if (!xlnx_is_error_event(node_id)) {
- xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
} else {
for (pos = 0; pos < MAX_BITS; pos++) {
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
- /* Un-register for Node-Id/Event combination */
- ret = zynqmp_pm_register_notifier(node_id, event, false, false);
- if (ret) {
- pr_err("%s() failed for 0x%x and 0x%x: %d\n",
- __func__, node_id, event, ret);
- return ret;
+ /* Un-register if list is empty */
+ if (is_need_to_unregister) {
+ /* Un-register for Node-Id/Event combination */
+ ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+ __func__, node_id, event, ret);
+ return ret;
+ }
}
}
@@ -338,12 +423,16 @@ static void xlnx_call_suspend_cb_handler(const u32 *payload)
bool is_callback_found = false;
struct registered_event_data *eve_data;
u32 cb_type = payload[0];
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
if (eve_data->cb_type == cb_type) {
- eve_data->eve_cb(&payload[0], eve_data->agent_data);
- is_callback_found = true;
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
}
}
if (!is_callback_found)
@@ -356,12 +445,16 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
struct registered_event_data *eve_data;
u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
- eve_data->eve_cb(&payload[0], eve_data->agent_data);
- is_callback_found = true;
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
/* re register with firmware to get future events */
ret = zynqmp_pm_register_notifier(payload[1], payload[2],
@@ -369,9 +462,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
payload[1], payload[2], ret);
- /* Remove already registered event from hash table */
- xlnx_remove_cb_for_notify_event(payload[1], payload[2],
- eve_data->eve_cb);
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
+ list) {
+ /* Remove already registered event from hash table */
+ xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+ cb_pos->eve_cb,
+ cb_pos->agent_data);
+ }
}
}
}
@@ -572,8 +669,14 @@ static int xlnx_event_manager_remove(struct platform_device *pdev)
struct registered_event_data *eve_data;
struct hlist_node *tmp;
int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
hash_del(&eve_data->hentry);
kfree(eve_data);
}
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 859dd31b6eff..78a8a7545d1e 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -208,7 +208,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!zynqmp_pm_init_suspend_work) {
xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
- suspend_event_callback);
+ suspend_event_callback, NULL);
return -ENOMEM;
}
event_registered = true;
@@ -263,7 +263,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (ret) {
if (event_registered) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
+ NULL);
event_registered = false;
}
dev_err(&pdev->dev, "unable to create sysfs interface\n");
@@ -277,7 +278,7 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (event_registered)
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
if (!rx_chan)
mbox_free_channel(rx_chan);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 354d3f89366f..a2bfb0434a67 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -536,11 +536,9 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(&slave->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
ret = sdw_nread_no_pm(slave, addr, count, val);
@@ -562,11 +560,9 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(&slave->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
ret = sdw_nwrite_no_pm(slave, addr, count, val);
@@ -1506,10 +1502,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
- ret = pm_runtime_get_sync(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
if (ret < 0 && ret != -EACCES) {
dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
- pm_runtime_put_noidle(&slave->dev);
return ret;
}
@@ -1838,6 +1833,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
__func__, slave->dev_num);
complete(&slave->initialization_complete);
+
+ /*
+ * If the manager became pm_runtime active, the peripherals will be
+ * restarted and attach, but their pm_runtime status may remain
+ * suspended. If the 'update_slave_status' callback initiates
+ * any sort of deferred processing, this processing would not be
+ * cancelled on pm_runtime suspend.
+ * To avoid such zombie states, we queue a request to resume.
+ * This would be a no-op in case the peripheral was being resumed
+ * by e.g. the ALSA/ASoC framework.
+ */
+ pm_request_resume(&slave->dev);
}
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 558390af44b6..4fbb19557f5e 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -386,12 +386,11 @@ static int cdns_parity_error_injection(void *data, u64 value)
* Resume Master device. If this results in a bus reset, the
* Slave devices will re-attach and be re-enumerated.
*/
- ret = pm_runtime_get_sync(bus->dev);
+ ret = pm_runtime_resume_and_get(bus->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(cdns->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(bus->dev);
return ret;
}
@@ -959,6 +958,8 @@ static void cdns_update_slave_status_work(struct work_struct *work)
container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
u64 slave_intstat;
+ u32 device0_status;
+ int retry_count = 0;
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
@@ -968,10 +969,45 @@ static void cdns_update_slave_status_work(struct work_struct *work)
dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
+update_status:
cdns_update_slave_status(cdns, slave_intstat);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+ /*
+ * When there is more than one peripheral per link, it's
+ * possible that a deviceB becomes attached after we deal with
+ * the attachment of deviceA. Since the hardware does a
+ * logical AND, the attachment of the second device does not
+ * change the status seen by the driver.
+ *
+ * In that case, clearing the registers above would result in
+ * the deviceB never being detected - until a change of status
+ * is observed on the bus.
+ *
+ * To avoid this race condition, re-check if any device0 needs
+ * attention with PING commands. There is no need to check for
+ * ALERTS since they are not allowed until a non-zero
+ * device_number is assigned.
+ */
+
+ device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+ device0_status &= 3;
+
+ if (device0_status == SDW_SLAVE_ATTACHED) {
+ if (retry_count++ < SDW_MAX_DEVICES) {
+ dev_dbg_ratelimited(cdns->dev,
+ "Device0 detected after clearing status, iteration %d\n",
+ retry_count);
+ slave_intstat = CDNS_MCP_SLAVE_INTSTAT_ATTACHED;
+ goto update_status;
+ } else {
+ dev_err_ratelimited(cdns->dev,
+ "Device0 detected after %d iterations\n",
+ retry_count);
+ }
+ }
+
/* clear and unmask Slave interrupt now */
cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
cdns_updatel(cdns, CDNS_MCP_INTMASK,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 63101f1ba271..505c5ef061e3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -799,12 +799,11 @@ static int intel_startup(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
int ret;
- ret = pm_runtime_get_sync(cdns->dev);
+ ret = pm_runtime_resume_and_get(cdns->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(cdns->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(cdns->dev);
return ret;
}
return 0;
@@ -1293,6 +1292,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
+ /* avoid resuming from pm_runtime suspend if it's not required */
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
@@ -1828,6 +1830,9 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
return 0;
}
+ /* unconditionally disable WAKEEN interrupt */
+ intel_shim_wake(sdw, false);
+
link_flags = md_flags >> (bus->link_id * 8);
multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index da1ad7ebb1aa..22b706350ead 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -105,7 +105,7 @@
#define SWRM_SPECIAL_CMD_ID 0xF
#define MAX_FREQ_NUM 1
-#define TIMEOUT_MS (2 * HZ)
+#define TIMEOUT_MS 100
#define QCOM_SWRM_MAX_RD_LEN 0x1
#define QCOM_SDW_MAX_PORTS 14
#define DEFAULT_CLK_FREQ 9600000
@@ -510,12 +510,12 @@ static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id)
struct qcom_swrm_ctrl *swrm = dev_id;
int ret;
- ret = pm_runtime_get_sync(swrm->dev);
+ ret = pm_runtime_resume_and_get(swrm->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(swrm->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(swrm->dev);
+ return ret;
}
if (swrm->wake_irq > 0) {
@@ -1058,12 +1058,11 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int ret, i;
- ret = pm_runtime_get_sync(ctrl->dev);
+ ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(ctrl->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(ctrl->dev);
return ret;
}
@@ -1252,12 +1251,12 @@ static int swrm_reg_show(struct seq_file *s_file, void *data)
struct qcom_swrm_ctrl *swrm = s_file->private;
int reg, reg_val, ret;
- ret = pm_runtime_get_sync(swrm->dev);
+ ret = pm_runtime_resume_and_get(swrm->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(swrm->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(swrm->dev);
+ return ret;
}
for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) {
@@ -1452,7 +1451,7 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
} while (retry--);
dev_err(swrm->dev, "%s: link status not %s\n", __func__,
- comp_sts && SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
+ comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
return false;
}
@@ -1549,6 +1548,7 @@ static const struct dev_pm_ops swrm_dev_pm_ops = {
static const struct of_device_id qcom_swrm_of_match[] = {
{ .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data },
{ .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
+ { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_5_data },
{/* sentinel */},
};
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f273459b2023..d34150559142 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -822,6 +822,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
} else if (multi_link) {
dev_err(bus->dev,
"Post bank switch ops not implemented\n");
+ ret = -EINVAL;
goto error;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b9e2c7e7c580..ea09d1b42bf6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -71,29 +71,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
- const char *end = memchr(buf, '\n', count);
- const size_t len = end ? end - buf : count;
- const char *driver_override, *old;
-
- /* We need to keep extra room for a newline when displaying value */
- if (len >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, len, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
+ int ret;
- device_lock(dev);
- old = spi->driver_override;
- if (len) {
- spi->driver_override = driver_override;
- } else {
- /* Empty string, disable driver override */
- spi->driver_override = NULL;
- kfree(driver_override);
- }
- device_unlock(dev);
- kfree(old);
+ ret = driver_set_override(dev, &spi->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index fc274737053d..0a993c47273e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -64,8 +64,6 @@ source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/unisys/Kconfig"
-
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -86,5 +84,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/vme_user/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 65e317922e3f..2800ab9b2d1d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_VME_BUS) += vme/
+obj-$(CONFIG_VME_BUS) += vme_user/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
@@ -22,7 +22,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index a344410e48fe..cd86b9c9e345 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
goto err_device;
return cd;
err_device:
- device_unregister(&cd->client->dev);
+ put_device(&cd->client->dev);
err_kthread:
kthread_stop(cd->qthread);
err_reset:
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index bbf3ba744fc4..45afa208d004 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -445,7 +445,7 @@ static int __maybe_unused arche_apb_ctrl_suspend(struct device *dev)
static int __maybe_unused arche_apb_ctrl_resume(struct device *dev)
{
/*
- * Atleast for ES2 we have to meet the delay requirement between
+ * At least for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e374dfc0c92f..fcbd5f71eff2 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -591,7 +591,7 @@ static __maybe_unused int arche_platform_suspend(struct device *dev)
static __maybe_unused int arche_platform_resume(struct device *dev)
{
/*
- * Atleast for ES2 we have to meet the delay requirement between
+ * At least for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index db0b600ee5d1..0ad8aeabccbf 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -497,7 +497,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int ret;
- struct gbaudio_module_info *module;
+ struct gbaudio_module_info *module = NULL, *iter;
struct gbaudio_data_connection *data;
struct gb_bundle *bundle;
struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
@@ -511,11 +511,13 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
return -ENODEV;
}
- list_for_each_entry(module, &codec->module_list, list) {
+ list_for_each_entry(iter, &codec->module_list, list) {
/* find the dai */
- data = find_data(module, dai->id);
- if (data)
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
break;
+ }
}
if (!data) {
dev_err(dai->dev, "DATA connection missing\n");
@@ -563,7 +565,7 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
{
int ret;
struct gbaudio_data_connection *data;
- struct gbaudio_module_info *module;
+ struct gbaudio_module_info *module = NULL, *iter;
struct gb_bundle *bundle;
struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
struct gbaudio_stream_params *params;
@@ -592,15 +594,17 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
return ret;
}
- list_for_each_entry(module, &codec->module_list, list) {
+ list_for_each_entry(iter, &codec->module_list, list) {
/* find the dai */
- data = find_data(module, dai->id);
- if (data)
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
break;
+ }
}
if (!data) {
- dev_err(dai->dev, "%s:%s DATA connection missing\n",
- dai->name, module->name);
+ dev_err(dai->dev, "%s DATA connection missing\n",
+ dai->name);
mutex_unlock(&codec->lock);
return -ENODEV;
}
@@ -1027,12 +1031,6 @@ static int gbcodec_probe(struct snd_soc_component *comp)
return 0;
}
-static void gbcodec_remove(struct snd_soc_component *comp)
-{
- /* Empty function for now */
- return;
-}
-
static int gbcodec_write(struct snd_soc_component *comp, unsigned int reg,
unsigned int value)
{
@@ -1047,8 +1045,6 @@ static unsigned int gbcodec_read(struct snd_soc_component *comp,
static const struct snd_soc_component_driver soc_codec_dev_gbaudio = {
.probe = gbcodec_probe,
- .remove = gbcodec_remove,
-
.read = gbcodec_read,
.write = gbcodec_write,
};
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index ad20ec24031e..3fda172239d2 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -297,7 +297,6 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
pwm->dev = &gbphy_dev->dev;
pwm->ops = &gb_pwm_ops;
- pwm->base = -1; /* Allocate base dynamically */
pwm->npwm = pwmc->pwm_max + 1;
ret = pwmchip_add(pwm);
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index 867bf289df2e..4c42e393cd3d 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -533,7 +533,7 @@ static int log_results(struct loopback_test *t)
fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, 0644);
if (fd < 0) {
- fprintf(stderr, "unable to open %s for appendation\n", file_name);
+ fprintf(stderr, "unable to open %s for appending\n", file_name);
abort();
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 71c709771676..52b8957c19c9 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -290,7 +290,7 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
int ret, timeout = 10;
bool doit;
- ret = strtobool(buf, &doit);
+ ret = kstrtobool(buf, &doit);
if (ret < 0)
return ret;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 793918e1c45f..f177b20f0f2d 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -749,7 +749,6 @@ static int ad5933_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ad5933_ring_setup_ops);
if (ret)
return ret;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 74adb82f37c3..c0b2716d0511 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -499,7 +499,6 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
case IIO_ANGL_VEL:
- negative = st->rx[0] & 0x80;
vel = be16_to_cpup((__be16 *)st->rx);
vel >>= 16 - st->resolution;
if (vel & 0x8000) {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 1c63d595313d..9429ee155910 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -84,10 +84,6 @@ static void ks_wlan_hw_wakeup_task(struct work_struct *work)
return;
}
}
-
- /* power save */
- if (atomic_read(&priv->sme_task.count) > 0)
- tasklet_enable(&priv->sme_task);
}
static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
@@ -2200,10 +2196,11 @@ static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
}
}
-static
-void hostif_sme_task(struct tasklet_struct *t)
+static void hostif_sme_work(struct work_struct *work)
{
- struct ks_wlan_private *priv = from_tasklet(priv, t, sme_task);
+ struct ks_wlan_private *priv;
+
+ priv = container_of(work, struct ks_wlan_private, sme_work);
if (priv->dev_state < DEVICE_STATE_BOOT)
return;
@@ -2214,7 +2211,7 @@ void hostif_sme_task(struct tasklet_struct *t)
hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
inc_smeqhead(priv);
if (cnt_smeqbody(priv) > 0)
- tasklet_schedule(&priv->sme_task);
+ schedule_work(&priv->sme_work);
}
/* send to Station Management Entity module */
@@ -2229,7 +2226,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
- tasklet_schedule(&priv->sme_task);
+ schedule_work(&priv->sme_work);
}
static inline void hostif_aplist_init(struct ks_wlan_private *priv)
@@ -2254,7 +2251,7 @@ static inline void hostif_sme_init(struct ks_wlan_private *priv)
priv->sme_i.qtail = 0;
spin_lock_init(&priv->sme_i.sme_spin);
priv->sme_i.sme_flag = 0;
- tasklet_setup(&priv->sme_task, hostif_sme_task);
+ INIT_WORK(&priv->sme_work, hostif_sme_work);
}
static inline void hostif_wpa_init(struct ks_wlan_private *priv)
@@ -2312,5 +2309,5 @@ int hostif_init(struct ks_wlan_private *priv)
void hostif_exit(struct ks_wlan_private *priv)
{
- tasklet_kill(&priv->sme_task);
+ cancel_work_sync(&priv->sme_work);
}
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index 7aaf8d780939..3e9a91b5131c 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -449,7 +449,7 @@ struct ks_wlan_private {
struct sme_info sme_i;
u8 *rxp;
unsigned int rx_size;
- struct tasklet_struct sme_task;
+ struct work_struct sme_work;
struct work_struct wakeup_work;
int scan_ind_count;
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 29f8ce2a47f5..97dff82b7a5f 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -45,9 +45,6 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
static DEFINE_SPINLOCK(dim_lock);
-static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
-
/**
* struct hdm_channel - private structure to keep channel specific data
* @name: channel name
@@ -361,15 +358,9 @@ static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
return IRQ_HANDLED;
}
-/**
- * dim2_tasklet_fn - tasklet function
- * @data: private data
- *
- * Service each initialized channel, if needed
- */
-static void dim2_tasklet_fn(unsigned long data)
+static irqreturn_t dim2_task_irq(int irq, void *_dev)
{
- struct dim2_hdm *dev = (struct dim2_hdm *)data;
+ struct dim2_hdm *dev = _dev;
unsigned long flags;
int ch_idx;
@@ -385,6 +376,8 @@ static void dim2_tasklet_fn(unsigned long data)
while (!try_start_dim_transfer(dev->hch + ch_idx))
continue;
}
+
+ return IRQ_HANDLED;
}
/**
@@ -392,8 +385,8 @@ static void dim2_tasklet_fn(unsigned long data)
* @irq: irq number
* @_dev: private data
*
- * Acknowledge the interrupt and schedule a tasklet to service channels.
- * Return IRQ_HANDLED.
+ * Acknowledge the interrupt and service each initialized channel,
+ * if needed, in task context.
*/
static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
{
@@ -405,9 +398,7 @@ static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
dim_service_ahb_int_irq(get_active_channels(dev, buffer));
spin_unlock_irqrestore(&dim_lock, flags);
- dim2_tasklet.data = (unsigned long)dev;
- tasklet_schedule(&dim2_tasklet);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
/**
@@ -654,14 +645,12 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
if (!hdm_ch->is_initialized)
return -EPERM;
- tasklet_disable(&dim2_tasklet);
spin_lock_irqsave(&dim_lock, flags);
hal_ret = dim_destroy_channel(&hdm_ch->ch);
hdm_ch->is_initialized = false;
if (ch_idx == dev->atx_idx)
dev->atx_idx = -1;
spin_unlock_irqrestore(&dim_lock, flags);
- tasklet_enable(&dim2_tasklet);
if (hal_ret != DIM_NO_ERROR) {
pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
ret = -EFAULT;
@@ -821,8 +810,8 @@ static int dim2_probe(struct platform_device *pdev)
goto err_shutdown_dim;
}
- ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
- "dim2_ahb0_int", dev);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dim2_ahb_isr,
+ dim2_task_irq, 0, "dim2_ahb0_int", dev);
if (ret) {
dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
goto err_shutdown_dim;
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 55e0ad759250..d0dd659834ee 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -2072,6 +2072,7 @@ struct qlge_adapter *netdev_to_qdev(struct net_device *ndev)
return ndev_priv->qdev;
}
+
/*
* The main Adapter structure definition.
* This structure has all fields relevant to the hardware.
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index 2ff78ed1faab..ac6effbecf6d 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -188,7 +188,6 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
}
}
-
}
spin_unlock_bh(&pstapriv->auth_list_lock);
@@ -381,7 +380,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
/* set ra_id, init_rate */
psta->raid = raid;
psta->init_rate = init_rate;
-
}
}
@@ -455,7 +453,6 @@ void update_bmc_sta(struct adapter *padapter)
spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
-
}
}
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index f056204c0fdb..bca20fe5c983 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -53,7 +53,8 @@ static unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned shor
unsigned char *cur_ptr, *start_ptr;
unsigned short tagLen, tagType;
- start_ptr = cur_ptr = (unsigned char *)ph->tag;
+ start_ptr = (unsigned char *)ph->tag;
+ cur_ptr = (unsigned char *)ph->tag;
while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
/* prevent un-alignment access */
tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
@@ -87,19 +88,19 @@ static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
int tail_len;
unsigned long end, tail;
- if ((src+len) > skb_tail_pointer(skb) || skb->len < len)
+ if ((src + len) > skb_tail_pointer(skb) || skb->len < len)
return -1;
tail = (unsigned long)skb_tail_pointer(skb);
- end = (unsigned long)src+len;
+ end = (unsigned long)src + len;
if (tail < end)
return -1;
- tail_len = (int)(tail-end);
+ tail_len = (int)(tail - end);
if (tail_len > 0)
- memmove(src, src+len, tail_len);
+ memmove(src, src + len, tail_len);
- skb_trim(skb, skb->len-len);
+ skb_trim(skb, skb->len - len);
return 0;
}
@@ -117,7 +118,7 @@ static void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_IPV4;
- memcpy(networkAddr+7, (unsigned char *)ipAddr, 4);
+ memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4);
}
static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
@@ -126,8 +127,8 @@ static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_PPPOE;
- memcpy(networkAddr+1, (unsigned char *)sid, 2);
- memcpy(networkAddr+3, (unsigned char *)ac_mac, 6);
+ memcpy(networkAddr + 1, (unsigned char *)sid, 2);
+ memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6);
}
static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
@@ -136,17 +137,17 @@ static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_IPV6;
- memcpy(networkAddr+1, (unsigned char *)ipAddr, 16);
+ memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16);
}
static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
{
while (len > 0) {
- if (*data == tag && *(data+1) == len8b && len >= len8b*8)
- return data+2;
+ if (*data == tag && *(data + 1) == len8b && len >= len8b * 8)
+ return data + 2;
- len -= (*(data+1))*8;
- data += (*(data+1))*8;
+ len -= (*(data + 1)) * 8;
+ data += (*(data + 1)) * 8;
}
return NULL;
}
@@ -158,7 +159,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
if (len >= 8) {
- mac = scan_tlv(&data[8], len-8, 1, 1);
+ mac = scan_tlv(&data[8], len - 8, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -166,7 +167,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
if (len >= 16) {
- mac = scan_tlv(&data[16], len-16, 1, 1);
+ mac = scan_tlv(&data[16], len - 16, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -174,7 +175,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
if (len >= 24) {
- mac = scan_tlv(&data[24], len-24, 1, 1);
+ mac = scan_tlv(&data[24], len - 24, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -182,7 +183,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
if (len >= 24) {
- mac = scan_tlv(&data[24], len-24, 2, 1);
+ mac = scan_tlv(&data[24], len - 24, 2, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -190,7 +191,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
if (len >= 40) {
- mac = scan_tlv(&data[40], len-40, 2, 1);
+ mac = scan_tlv(&data[40], len - 40, 2, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -313,6 +314,7 @@ void nat25_db_cleanup(struct adapter *priv)
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
+
f = priv->nethash[i];
while (f) {
struct nat25_network_db_entry *g;
@@ -339,12 +341,12 @@ void nat25_db_expire(struct adapter *priv)
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
- f = priv->nethash[i];
+ f = priv->nethash[i];
while (f) {
struct nat25_network_db_entry *g;
- g = f->next_hash;
+ g = f->next_hash;
if (__nat25_has_expired(f)) {
if (atomic_dec_and_test(&f->use_count)) {
if (priv->scdb_entry == f) {
@@ -396,7 +398,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
tmp = be32_to_cpu(iph->saddr);
__nat25_generate_ipv4_network_addr(networkAddr, &tmp);
/* record source IP address and , source mac address into db */
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
return 0;
default:
return -1;
@@ -421,7 +423,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
arp_ptr += arp->ar_hln;
sender = (unsigned int *)arp_ptr;
__nat25_generate_ipv4_network_addr(networkAddr, sender);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
return 0;
default:
return -1;
@@ -432,7 +434,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
/* Handle PPPoE frame */
/*---------------------------------------------------*/
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- unsigned short *pMagic;
+ __be16 *pMagic;
switch (method) {
case NAT25_CHECK:
@@ -458,22 +460,22 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
sizeof(tag_buf))
return -1;
- memcpy(tag->tag_data+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN,
+ memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN,
pOldTag->tag_data, old_tag_len);
- if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN+old_tag_len) < 0)
+ if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0)
return -1;
- ph->length = htons(ntohs(ph->length)-TAG_HDR_LEN-old_tag_len);
+ ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len);
}
tag->tag_type = PTT_RELAY_SID;
- tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
+ tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len);
/* insert the magic_code+client mac in relay tag */
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
*pMagic = htons(MAGIC_CODE);
- memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN);
/* Add relay tag */
if (__nat25_add_pppoe_tag(skb, tag) < 0)
@@ -486,7 +488,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
return -2;
if (priv->pppoe_connection_in_progress == 0)
- memcpy(priv->pppoe_addr, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(priv->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN);
priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
}
@@ -496,11 +498,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
} else { /* session phase */
__nat25_generate_pppoe_network_addr(networkAddr, skb->data, &ph->sid);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
if (!priv->ethBrExtInfo.addPPPoETag &&
priv->pppoe_connection_in_progress &&
- !memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
+ !memcmp(skb->data + ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
priv->pppoe_connection_in_progress = 0;
}
return 0;
@@ -548,7 +550,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
case NAT25_INSERT:
if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
__nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
if (iph->nexthdr == IPPROTO_ICMPV6 &&
skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
@@ -557,9 +559,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
hdr->icmp6_cksum = 0;
hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
- iph->payload_len,
+ be16_to_cpu(iph->payload_len),
IPPROTO_ICMPV6,
- csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ csum_partial((__u8 *)hdr,
+ be16_to_cpu(iph->payload_len),
+ 0));
}
}
}
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 6eca30124ee8..06523d91939a 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -11,14 +11,54 @@
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
-/*
-Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+/* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
-static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+static void c2h_wk_callback(struct work_struct *work);
+
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
- int res = _SUCCESS;
+ cancel_work_sync(&pevtpriv->c2h_wk);
+ while (pevtpriv->c2h_wk_alive)
+ msleep(10);
+
+ while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+ void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+ if (c2h && c2h != (void *)pevtpriv)
+ kfree(c2h);
+ }
+}
+
+/* Calling Context:
+ *
+ * rtw_enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ */
+
+static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+ unsigned long flags;
+
+ if (!obj)
+ goto exit;
+
+ spin_lock_irqsave(&queue->lock, flags);
+
+ list_add_tail(&obj->list, &queue->queue);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+exit:
+
+ return _SUCCESS;
+}
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ u32 res = _SUCCESS;
init_completion(&pcmdpriv->enqueue_cmd);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
@@ -57,11 +97,9 @@ exit:
return res;
}
-static void c2h_wk_callback(struct work_struct *work);
-
-static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
- int res = _SUCCESS;
+ u32 res = _SUCCESS;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
@@ -69,24 +107,13 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
+ if (!pevtpriv->c2h_queue)
+ res = _FAIL;
return res;
}
-void rtw_free_evt_priv(struct evt_priv *pevtpriv)
-{
- cancel_work_sync(&pevtpriv->c2h_wk);
- while (pevtpriv->c2h_wk_alive)
- msleep(10);
-
- while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
- void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
- if (c2h && c2h != (void *)pevtpriv)
- kfree(c2h);
- }
-}
-
-static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
if (pcmdpriv) {
kfree(pcmdpriv->cmd_allocated_buf);
@@ -94,75 +121,6 @@ static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
}
}
-/*
-Calling Context:
-
-rtw_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
-
-static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
-{
- unsigned long flags;
-
- if (!obj)
- goto exit;
-
- spin_lock_irqsave(&queue->lock, flags);
-
- list_add_tail(&obj->list, &queue->queue);
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
-exit:
-
- return _SUCCESS;
-}
-
-static struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
-{
- struct cmd_obj *obj;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
- if (list_empty(&queue->queue)) {
- obj = NULL;
- } else {
- obj = container_of((&queue->queue)->next, struct cmd_obj, list);
- list_del_init(&obj->list);
- }
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return obj;
-}
-
-u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- u32 res;
-
- res = _rtw_init_cmd_priv(pcmdpriv);
-
- return res;
-}
-
-u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
-{
- int res;
-
- res = _rtw_init_evt_priv(pevtpriv);
-
- return res;
-}
-
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- _rtw_free_cmd_priv(pcmdpriv);
-}
-
static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
@@ -187,7 +145,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
cmd_obj->padapter = padapter;
res = rtw_cmd_filter(pcmdpriv, cmd_obj);
- if (_FAIL == res) {
+ if (res == _FAIL) {
rtw_free_cmd_obj(cmd_obj);
goto exit;
}
@@ -204,11 +162,21 @@ exit:
struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
- struct cmd_obj *cmd_obj;
+ struct cmd_obj *obj;
+ struct __queue *queue = &pcmdpriv->cmd_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ if (list_empty(&queue->queue)) {
+ obj = NULL;
+ } else {
+ obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+ list_del_init(&obj->list);
+ }
- cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
- return cmd_obj;
+ return obj;
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
@@ -258,12 +226,12 @@ _next:
if (!pcmd)
continue;
- if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+ if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
pcmd->res = H2C_DROPPED;
goto post_process;
}
- pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+ pcmd->cmdsz = round_up(pcmd->cmdsz, 4);
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
@@ -291,7 +259,7 @@ post_process:
rtw_free_cmd_obj(pcmd);
else
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
- pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
} else {
rtw_free_cmd_obj(pcmd);
}
@@ -316,11 +284,10 @@ post_process:
return 0;
}
-/*
-rtw_sitesurvey_cmd(~)
- ### NOTE:#### (!!!!)
- MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+/* rtw_sitesurvey_cmd(~)
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
struct rtw_ieee80211_channel *ch, int ch_num)
{
@@ -330,19 +297,17 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
- }
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
- }
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c)
return _FAIL;
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
@@ -403,13 +368,13 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pbsetdataratepara = kzalloc(sizeof(struct setdatarate_parm), GFP_ATOMIC);
+ pbsetdataratepara = kzalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
if (!pbsetdataratepara) {
kfree(ph2c);
res = _FAIL;
@@ -442,7 +407,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -479,7 +444,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -516,15 +481,14 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
- if (psecnetwork->IELength - 12 < 255) {
+ if (psecnetwork->IELength - 12 < 255)
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength - 12);
- } else {
+ else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], 255);
- }
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
@@ -550,9 +514,9 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
phtpriv->ht_option = false;
if (pregistrypriv->ht_enable) {
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
@@ -611,7 +575,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
res = rtw_enqueue_cmd(cmdpriv, cmdobj);
} else {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+ if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS)
res = _FAIL;
kfree(param);
}
@@ -629,12 +593,12 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra n
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = false;
goto exit;
}
- psetop = kzalloc(sizeof(struct setopmode_parm), GFP_KERNEL);
+ psetop = kzalloc(sizeof(*psetop), GFP_KERNEL);
if (!psetop) {
kfree(ph2c);
@@ -664,20 +628,20 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
+ psetstakey_para = kzalloc(sizeof(*psetstakey_para), GFP_KERNEL);
if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL);
+ psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp), GFP_KERNEL);
if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
@@ -723,13 +687,13 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
if (!enqueue) {
clear_cam_entry(padapter, entry);
} else {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm),
+ psetstakey_para = kzalloc(sizeof(*psetstakey_para),
GFP_ATOMIC);
if (!psetstakey_para) {
kfree(ph2c);
@@ -737,7 +701,7 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
goto exit;
}
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp),
+ psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp),
GFP_ATOMIC);
if (!psetstakey_rsp) {
kfree(ph2c);
@@ -770,13 +734,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
struct addBaReq_parm *paddbareq_parm;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
+ paddbareq_parm = kzalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
@@ -803,13 +767,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -844,7 +808,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
}
/* prepare cmd parameter */
- setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param),
+ setChannelPlan_param = kzalloc(sizeof(*setChannelPlan_param),
GFP_KERNEL);
if (!setChannelPlan_param) {
res = _FAIL;
@@ -853,7 +817,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
setChannelPlan_param->channel_plan = chplan;
/* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmdobj = kzalloc(sizeof(*pcmdobj), GFP_KERNEL);
if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
@@ -983,12 +947,12 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
mstatus = 1;/* connect */
/* Reset LPS Setting */
padapter->pwrctrlpriv.LpsIdleCount = 0;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
break;
case LPS_CTRL_DISCONNECT:
mstatus = 0;/* disconnect */
LPS_Leave(padapter);
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
break;
case LPS_CTRL_SPECIAL_PACKET:
pwrpriv->DelayLPSLastTimeStamp = jiffies;
@@ -1012,16 +976,16 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
u8 res = _SUCCESS;
/* if (!pwrctrlpriv->bLeisurePs) */
- /* return res; */
+ /* return res; */
if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1047,7 +1011,10 @@ exit:
static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
{
- SetHwReg8188EU(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+ struct hal_data_8188e *haldata = &padapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ ODM_RA_Set_TxRPT_Time(odmpriv, min_time);
}
u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
@@ -1058,13 +1025,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1084,7 +1051,20 @@ exit:
static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
{
- SetHwReg8188EU(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+ struct hal_data_8188e *haldata = &padapter->haldata;
+
+ /* switch current antenna to optimum antenna */
+ if (haldata->CurAntenna != antenna) {
+ ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, antenna == 2 ? MAIN_ANT : AUX_ANT);
+ haldata->CurAntenna = antenna;
+ }
+}
+
+static bool rtw_antenna_diversity(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+
+ return haldata->AntDivCfg != 0;
}
u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
@@ -1092,21 +1072,19 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 support_ant_div;
u8 res = _SUCCESS;
- GetHalDefVar8188EUsb(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
- if (!support_ant_div)
+ if (!rtw_antenna_diversity(padapter))
return res;
if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_KERNEL);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1139,13 +1117,13 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1153,8 +1131,8 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
}
pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
- pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
- pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command type. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
@@ -1173,13 +1151,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ppscmd = kzalloc(sizeof(*ppscmd), GFP_ATOMIC);
if (!ppscmd) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ppscmd);
res = _FAIL;
@@ -1197,6 +1175,11 @@ exit:
return res;
}
+static bool rtw_is_hi_queue_empty(struct adapter *adapter)
+{
+ return (rtw_read32(adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0;
+}
+
static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
{
int cnt = 0;
@@ -1208,12 +1191,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
return;
if (psta_bmc->sleepq_len == 0) {
- u8 val = 0;
-
- /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
- /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
-
- GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ bool val = rtw_is_hi_queue_empty(padapter);
while (!val) {
msleep(100);
@@ -1223,7 +1201,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
if (cnt > 10)
break;
- GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ val = rtw_is_hi_queue_empty(padapter);
}
if (cnt <= 10) {
@@ -1244,13 +1222,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1275,13 +1253,13 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1380,8 +1358,8 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case P2P_PROTO_WK_CID:
- /* Commented by Albert 2011/07/01 */
- /* I used the type_size as the type command */
+ /* Commented by Albert 2011/07/01 */
+ /* I used the type_size as the type command */
p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case CHECK_HIQ_WK_CID:
@@ -1404,11 +1382,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
}
@@ -1416,6 +1391,7 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_free_cmd_obj(pcmd);
}
+
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1426,8 +1402,10 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
spin_unlock_bh(&pmlmepriv->lock);
return;
- } else /* clear bridge database */
- nat25_db_cleanup(padapter);
+ }
+
+ /* clear bridge database */
+ nat25_db_cleanup(padapter);
/* free cmd */
rtw_free_cmd_obj(pcmd);
@@ -1437,11 +1415,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, 1);
}
@@ -1474,7 +1449,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_indicate_connect(padapter);
} else {
- pwlan = _rtw_alloc_network(pmlmepriv);
+ pwlan = rtw_alloc_network(pmlmepriv);
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 625d186c3647..0451e5177644 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -4,51 +4,43 @@
#include <linux/firmware.h>
#include "../include/rtw_fw.h"
-#define MAX_REG_BOLCK_SIZE 196
+#define MAX_REG_BLOCK_SIZE 196
#define FW_8188E_START_ADDRESS 0x1000
#define MAX_PAGE_SIZE 4096
#define IS_FW_HEADER_EXIST(_fwhdr) \
- ((le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x92C0 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88C0 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x2300 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88E0)
-
-/* This structure must be careful with byte-ordering */
+ ((le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x92C0 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88C0 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x2300 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88E0)
struct rt_firmware_hdr {
- /* 8-byte alinment required */
- /* LONG WORD 0 ---- */
- __le16 Signature; /* 92C0: test chip; 92C,
- * 88C0: test chip; 88C1: MP A-cut;
- * 92C1: MP A-cut */
- u8 Category; /* AP/NIC and USB/PCI */
- u8 Function; /* Reserved for different FW function
- * indcation, for further use when
- * driver needs to download different
- * FW for different conditions */
- __le16 Version; /* FW Version */
- u8 Subversion; /* FW Subversion, default 0x00 */
- u16 Rsvd1;
-
- /* LONG WORD 1 ---- */
- u8 Month; /* Release time Month field */
- u8 Date; /* Release time Date field */
- u8 Hour; /* Release time Hour field */
- u8 Minute; /* Release time Minute field */
- __le16 RamCodeSize; /* The size of RAM code */
- u8 Foundry;
- u8 Rsvd2;
-
- /* LONG WORD 2 ---- */
- __le32 SvnIdx; /* The SVN entry index */
- u32 Rsvd3;
-
- /* LONG WORD 3 ---- */
- u32 Rsvd4;
- u32 Rsvd5;
+ __le16 signature; /* 92C0: test chip; 92C,
+ * 88C0: test chip; 88C1: MP A-cut;
+ * 92C1: MP A-cut */
+ u8 category; /* AP/NIC and USB/PCI */
+ u8 function; /* Reserved for different FW function
+ * indcation, for further use when
+ * driver needs to download different
+ * FW for different conditions */
+ __le16 version; /* FW Version */
+ u8 subversion; /* FW Subversion, default 0x00 */
+ u8 rsvd1;
+ u8 month; /* Release time Month field */
+ u8 date; /* Release time Date field */
+ u8 hour; /* Release time Hour field */
+ u8 minute; /* Release time Minute field */
+ __le16 ramcodesize; /* The size of RAM code */
+ u8 foundry;
+ u8 rsvd2;
+ __le32 svnidx; /* The SVN entry index */
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
+static_assert(sizeof(struct rt_firmware_hdr) == 32);
+
static void fw_download_enable(struct adapter *padapter, bool enable)
{
u8 tmp;
@@ -71,53 +63,55 @@ static void fw_download_enable(struct adapter *padapter, bool enable)
}
}
-static int block_write(struct adapter *padapter, void *buffer, u32 buffSize)
+static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
{
int ret = _SUCCESS;
- u32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
- u32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
- u32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */
- u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
- u32 remainSize_p1 = 0, remainSize_p2 = 0;
- u8 *bufferPtr = (u8 *)buffer;
- u32 i = 0, offset = 0;
-
- blockSize_p1 = MAX_REG_BOLCK_SIZE;
-
- /* 3 Phase #1 */
- blockCount_p1 = buffSize / blockSize_p1;
- remainSize_p1 = buffSize % blockSize_p1;
-
- for (i = 0; i < blockCount_p1; i++) {
- ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + i * blockSize_p1), blockSize_p1, (bufferPtr + i * blockSize_p1));
+ u32 blocks, block_size, remain;
+ u32 i, offset, addr;
+ u8 *data;
+
+ block_size = MAX_REG_BLOCK_SIZE;
+
+ blocks = size / block_size;
+ remain = size % block_size;
+
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + i * block_size;
+ data = buffer + i * block_size;
+
+ ret = rtw_writeN(padapter, addr, block_size, data);
if (ret == _FAIL)
goto exit;
}
- /* 3 Phase #2 */
- if (remainSize_p1) {
- offset = blockCount_p1 * blockSize_p1;
+ if (remain) {
+ offset = blocks * block_size;
+ block_size = 8;
- blockCount_p2 = remainSize_p1 / blockSize_p2;
- remainSize_p2 = remainSize_p1 % blockSize_p2;
+ blocks = remain / block_size;
+ remain = remain % block_size;
- for (i = 0; i < blockCount_p2; i++) {
- ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + offset + i * blockSize_p2), blockSize_p2, (bufferPtr + offset + i * blockSize_p2));
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + offset + i * block_size;
+ data = buffer + offset + i * block_size;
+ ret = rtw_writeN(padapter, addr, block_size, data);
if (ret == _FAIL)
goto exit;
}
}
- /* 3 Phase #3 */
- if (remainSize_p2) {
- offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+ if (remain) {
+ offset += blocks * block_size;
- blockCount_p3 = remainSize_p2 / blockSize_p3;
+ /* block size 1 */
+ blocks = remain;
- for (i = 0; i < blockCount_p3; i++) {
- ret = rtw_write8(padapter, (FW_8188E_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + offset + i;
+ data = buffer + offset + i;
+ ret = rtw_write8(padapter, addr, *data);
if (ret == _FAIL)
goto exit;
}
@@ -127,7 +121,7 @@ exit:
return ret;
}
-static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size)
+static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size)
{
u8 value8;
u8 u8Page = (u8)(page & 0x07);
@@ -138,21 +132,20 @@ static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size
return block_write(padapter, buffer, size);
}
-static int write_fw(struct adapter *padapter, void *buffer, u32 size)
+static int write_fw(struct adapter *padapter, u8 *buffer, u32 size)
{
/* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
/* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
int ret = _SUCCESS;
u32 pageNums, remainSize;
u32 page, offset;
- u8 *bufferPtr = (u8 *)buffer;
pageNums = size / MAX_PAGE_SIZE;
remainSize = size % MAX_PAGE_SIZE;
for (page = 0; page < pageNums; page++) {
offset = page * MAX_PAGE_SIZE;
- ret = page_write(padapter, page, bufferPtr + offset, MAX_PAGE_SIZE);
+ ret = page_write(padapter, page, buffer + offset, MAX_PAGE_SIZE);
if (ret == _FAIL)
goto exit;
@@ -160,7 +153,7 @@ static int write_fw(struct adapter *padapter, void *buffer, u32 size)
if (remainSize) {
offset = pageNums * MAX_PAGE_SIZE;
page = pageNums;
- ret = page_write(padapter, page, bufferPtr + offset, remainSize);
+ ret = page_write(padapter, page, buffer + offset, remainSize);
if (ret == _FAIL)
goto exit;
@@ -247,14 +240,12 @@ int rtl8188e_firmware_download(struct adapter *padapter)
{
int ret = _SUCCESS;
u8 write_fw_retry = 0;
- u32 fwdl_start_time;
+ unsigned long fwdl_timeout;
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
struct device *device = dvobj_to_dev(dvobj);
struct rt_firmware_hdr *fwhdr = NULL;
- u16 fw_version, fw_subversion, fw_signature;
u8 *fw_data;
u32 fw_size;
- static int log_version;
if (!dvobj->firmware.data)
ret = load_firmware(&dvobj->firmware, device);
@@ -265,21 +256,15 @@ int rtl8188e_firmware_download(struct adapter *padapter)
fw_data = dvobj->firmware.data;
fw_size = dvobj->firmware.size;
- /* To Check Fw header. Added by tynli. 2009.12.04. */
fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data;
- fw_version = le16_to_cpu(fwhdr->Version);
- fw_subversion = fwhdr->Subversion;
- fw_signature = le16_to_cpu(fwhdr->Signature);
-
- if (!log_version++)
- pr_info("%sFirmware Version %d, SubVersion %d, Signature 0x%x\n",
- DRIVER_PREFIX, fw_version, fw_subversion, fw_signature);
-
if (IS_FW_HEADER_EXIST(fwhdr)) {
- /* Shift 32 bytes for FW header */
- fw_data = fw_data + 32;
- fw_size = fw_size - 32;
+ pr_info_once("R8188EU: Firmware Version %d, SubVersion %d, Signature 0x%x\n",
+ le16_to_cpu(fwhdr->version), fwhdr->subversion,
+ le16_to_cpu(fwhdr->signature));
+
+ fw_data = fw_data + sizeof(struct rt_firmware_hdr);
+ fw_size = fw_size - sizeof(struct rt_firmware_hdr);
}
/* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
@@ -290,7 +275,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
}
fw_download_enable(padapter, true);
- fwdl_start_time = jiffies;
+ fwdl_timeout = jiffies + msecs_to_jiffies(500);
while (1) {
/* reset the FWDL chksum */
rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
@@ -298,7 +283,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
ret = write_fw(padapter, fw_data, fw_size);
if (ret == _SUCCESS ||
- (rtw_get_passing_time_ms(fwdl_start_time) > 500 && write_fw_retry++ >= 3))
+ (time_after(jiffies, fwdl_timeout) && write_fw_retry++ >= 3))
break;
}
fw_download_enable(padapter, false);
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index 5a0e42ed4a47..385a9ed8eff7 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -97,16 +97,15 @@ bool rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
- if (channel > 14) {
+ if (channel > 14)
return WIRELESS_INVALID;
- } else { /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
- }
+ /* could be pure B, pure G, or B/G */
+ if (rtw_is_cckratesonly_included(rate))
+ return WIRELESS_11B;
+ else if (rtw_is_cckrates_included(rate))
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -160,11 +159,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -295,10 +293,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit_new = limit - (pbuf - pie) - 2 - len;
@@ -558,9 +555,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt + 1] + 2;
break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return wpsie_ptr;
}
@@ -604,9 +600,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
@@ -901,9 +896,8 @@ u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
if (p2p_ielen)
*p2p_ielen = in_ie[cnt + 1] + 2;
return p2p_ie_ptr;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return NULL;
}
@@ -948,9 +942,8 @@ u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id, u8 *buf_attr
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
@@ -1058,7 +1051,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
- if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
@@ -1068,7 +1061,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
- if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 4b78e42d180d..7ba75f73e47e 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -44,7 +44,7 @@ u8 rtw_do_join(struct adapter *padapter)
pmlmepriv->to_roaming > 0) {
/* submit site_survey_cmd */
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (_SUCCESS != ret)
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
pmlmepriv->to_join = false;
@@ -91,7 +91,7 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (_SUCCESS != ret)
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
ret = _FAIL;
diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c
index e14e3746efdd..af8e84a41b85 100644
--- a/drivers/staging/r8188eu/core/rtw_iol.c
+++ b/drivers/staging/r8188eu/core/rtw_iol.c
@@ -57,10 +57,10 @@ int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len
bool rtw_IOL_applied(struct adapter *adapter)
{
- if (1 == adapter->registrypriv.fw_iol)
+ if (adapter->registrypriv.fw_iol == 1)
return true;
- if ((2 == adapter->registrypriv.fw_iol) &&
+ if ((adapter->registrypriv.fw_iol == 2) &&
(adapter_to_dvobj(adapter)->pusbdev->speed != USB_SPEED_HIGH))
return true;
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index ccd43accb7dc..2f3000428af7 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -110,7 +110,7 @@ static void blink_work(struct work_struct *work)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ } else {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -131,7 +131,7 @@ static void blink_work(struct work_struct *work)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ } else {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -278,7 +278,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
else
pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -304,7 +304,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
}
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -328,7 +328,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
else
pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ }
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 6f0bff186477..5a815642c3f6 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -16,7 +16,6 @@
#include "../include/usb_osintf.h"
#include "../include/rtl8188e_dm.h"
-extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
void rtw_set_roaming(struct adapter *adapter, u8 to_roaming)
@@ -31,60 +30,6 @@ u8 rtw_to_roaming(struct adapter *adapter)
return adapter->mlmepriv.to_roaming;
}
-int _rtw_init_mlme_priv(struct adapter *padapter)
-{
- int i;
- u8 *pbuf;
- struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int res = _SUCCESS;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
-
- pmlmepriv->nic_hdl = (u8 *)padapter;
-
- pmlmepriv->pscanned = NULL;
- pmlmepriv->fw_state = 0;
- pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
- pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
-
- spin_lock_init(&pmlmepriv->lock);
- rtw_init_queue(&pmlmepriv->free_bss_pool);
- rtw_init_queue(&pmlmepriv->scanned_queue);
-
- set_scanned_network_val(pmlmepriv, 0);
-
- memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
-
- pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
-
- if (!pbuf) {
- res = _FAIL;
- goto exit;
- }
- pmlmepriv->free_bss_buf = pbuf;
-
- pnetwork = (struct wlan_network *)pbuf;
-
- for (i = 0; i < MAX_BSS_CNT; i++) {
- INIT_LIST_HEAD(&pnetwork->list);
-
- list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
-
- pnetwork++;
- }
-
- /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
-
- rtw_clear_scan_deny(padapter);
-
- rtw_init_mlme_timer(padapter);
-
-exit:
-
- return res;
-}
-
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
kfree(*ppie);
@@ -95,7 +40,6 @@ static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
{
kfree(pmlmepriv->assoc_req);
- kfree(pmlmepriv->assoc_rsp);
rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
@@ -108,49 +52,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
}
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
-{
-
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-
- if (pmlmepriv) {
- vfree(pmlmepriv->free_bss_buf);
- }
-
-}
-
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
-{
- struct wlan_network *pnetwork;
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct list_head *plist = NULL;
-
- spin_lock_bh(&free_queue->lock);
-
- if (list_empty(&free_queue->queue)) {
- pnetwork = NULL;
- goto exit;
- }
- plist = (&free_queue->queue)->next;
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- list_del_init(&pnetwork->list);
-
- pnetwork->network_type = 0;
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
- pnetwork->aid = 0;
- pnetwork->join_res = 0;
-
- pmlmepriv->num_of_scanned++;
-
-exit:
- spin_unlock_bh(&free_queue->lock);
-
- return pnetwork;
-}
-
void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
{
u32 curr_time, delta_time;
@@ -194,7 +95,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
/*
return the wlan_network with the matching addr
- Shall be calle under atomic context... to avoid possible racing condition...
+ Shall be called under atomic context... to avoid possible racing condition...
*/
struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
@@ -291,23 +192,92 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
{
- int res;
+ int i;
+ u8 *pbuf;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int res = _SUCCESS;
- res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+
+ pmlmepriv->nic_hdl = (u8 *)padapter;
+
+ pmlmepriv->pscanned = NULL;
+ pmlmepriv->fw_state = 0;
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ spin_lock_init(&pmlmepriv->lock);
+ rtw_init_queue(&pmlmepriv->free_bss_pool);
+ rtw_init_queue(&pmlmepriv->scanned_queue);
+
+ set_scanned_network_val(pmlmepriv, 0);
+
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+ if (!pbuf) {
+ res = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->free_bss_buf = pbuf;
+
+ pnetwork = (struct wlan_network *)pbuf;
+
+ for (i = 0; i < MAX_BSS_CNT; i++) {
+ INIT_LIST_HEAD(&pnetwork->list);
+
+ list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
+
+ pnetwork++;
+ }
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ rtw_init_mlme_timer(padapter);
+
+exit:
return res;
}
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
- _rtw_free_mlme_priv(pmlmepriv);
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+ vfree(pmlmepriv->free_bss_buf);
}
-static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
{
struct wlan_network *pnetwork;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct list_head *plist = NULL;
+
+ spin_lock_bh(&free_queue->lock);
+
+ if (list_empty(&free_queue->queue)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ plist = (&free_queue->queue)->next;
+
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ list_del_init(&pnetwork->list);
- pnetwork = _rtw_alloc_network(pmlmepriv);
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ pmlmepriv->num_of_scanned++;
+
+exit:
+ spin_unlock_bh(&free_queue->lock);
return pnetwork;
}
@@ -330,7 +300,7 @@ void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
/*
return the wlan_network with the matching addr
- Shall be calle under atomic context... to avoid possible racing condition...
+ Shall be called under atomic context... to avoid possible racing condition...
*/
struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
@@ -465,6 +435,13 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
+u8 rtw_current_antenna(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+
+ return haldata->CurAntenna;
+}
+
/*
Caller must hold pmlmepriv->lock first.
*/
@@ -498,7 +475,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
pnetwork = oldest;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
+
memcpy(&pnetwork->network, target, get_wlan_bssid_ex_sz(target));
/* variable initialize */
pnetwork->fixed = false;
@@ -521,7 +499,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
bssid_ex_sz = get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
memcpy(&pnetwork->network, target, bssid_ex_sz);
pnetwork->last_scanned = jiffies;
@@ -567,8 +545,8 @@ static void rtw_add_network(struct adapter *adapter,
/* select the desired network based on the capability of the (i)bss. */
/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
+/* (2) network_type */
+/* (3) WMM */
/* (4) HT */
/* (5) others */
static bool rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
@@ -715,15 +693,12 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (_SUCCESS == s_ret) {
- _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
- } else if (s_ret == 2) { /* there is no need to wait for join */
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- rtw_indicate_connect(adapter);
+ if (s_ret == _SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
} else {
if (rtw_to_roaming(adapter) != 0) {
if (--pmlmepriv->to_roaming == 0 ||
- _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+ rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
rtw_set_roaming(adapter, 0);
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
@@ -748,14 +723,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
rtw_os_xmit_schedule(adapter);
}
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
static void free_scanqueue(struct mlme_priv *pmlmepriv)
{
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
@@ -911,9 +878,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
}
- /* Commented by Albert 2012/07/21 */
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */
if (padapter->securitypriv.wps_ie_len != 0) {
psta->ieee8021x_blocked = true;
padapter->securitypriv.wps_ie_len = 0;
@@ -1071,8 +1037,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_indicate_connect(adapter);
}
+ spin_unlock_bh(&pmlmepriv->lock);
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
+ spin_lock_bh(&pmlmepriv->lock);
} else {
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
@@ -1105,6 +1073,11 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
}
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
+{
+ rtw_write8(adapter, REG_TX_RPT_CTRL + 1, macid + 1);
+}
+
static u8 search_max_mac_id(struct adapter *padapter)
{
u8 mac_id;
@@ -1141,7 +1114,8 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
return;
macid = search_max_mac_id(adapter);
- SetHwReg8188EU(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+ rtw_set_max_rpt_macid(adapter, macid);
+
/* MACID|OPMODE:1 connect */
media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
@@ -1299,7 +1273,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
}
/*
-* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
void _rtw_join_timeout_handler (struct adapter *adapter)
@@ -1310,7 +1284,7 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
if (rtw_to_roaming(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1329,12 +1303,12 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
rtw_indicate_disconnect(adapter);
free_scanqueue(pmlmepriv);/* */
}
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
}
/*
-* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
void rtw_scan_timeout_handler (struct adapter *adapter)
@@ -1414,6 +1388,7 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
{
int updated = false;
struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
+ unsigned long scan_res_expire;
/* check bssid, if needed */
if (pmlmepriv->assoc_by_bssid) {
@@ -1431,8 +1406,9 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
if (!rtw_is_desired_network(adapter, competitor))
goto exit;
+ scan_res_expire = competitor->last_scanned + msecs_to_jiffies(RTW_SCAN_RESULT_EXPIRE);
if (rtw_to_roaming(adapter) > 0) {
- if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE ||
+ if (time_after(jiffies, scan_res_expire) ||
!is_same_ess(&competitor->network, &pmlmepriv->cur_network.network))
goto exit;
}
@@ -1461,7 +1437,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
- u8 supp_ant_div = false;
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
@@ -1488,12 +1463,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
rtw_free_assoc_resources(adapter, 0);
}
- GetHalDefVar8188EUsb(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &supp_ant_div);
- if (supp_ant_div) {
- u8 cur_ant;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &cur_ant);
- }
-
ret = rtw_joinbss_cmd(adapter, candidate);
exit:
@@ -1509,13 +1478,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
int res = _SUCCESS;
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_KERNEL);
if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
- psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
+ psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_KERNEL);
if (!psetauthparm) {
kfree(pcmd);
res = _FAIL;
@@ -1628,38 +1597,22 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
}
/* */
-/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/* Added by Annie, 2006-05-07. */
-/* */
/* Search by BSSID, */
/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
/* */
/* */
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
- break;
- } else {
- i++;
- /* continue; */
- }
-
- } while (i < NUM_PMKID_CACHE);
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
- if (i == NUM_PMKID_CACHE) {
- i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
- return i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+ return i;
+ return -1;
}
/* */
@@ -1796,10 +1749,23 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
}
+static void rtw_set_threshold(struct adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ struct ht_priv *htpriv = &mlmepriv->htpriv;
+
+ if (htpriv->ht_option && adapter->registrypriv.wifi_spec != 1) {
+ /* validate usb rx aggregation, use init value. */
+ rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, USB_RXAGG_PAGE_COUNT);
+ } else {
+ /* invalidate usb rx aggregation */
+ rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, 1);
+ }
+}
+
/* the function is at passive_level */
void rtw_joinbss_reset(struct adapter *padapter)
{
- u8 threshold;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -1810,18 +1776,7 @@ void rtw_joinbss_reset(struct adapter *padapter)
phtpriv->ampdu_enable = false;/* reset to disabled */
- /* TH = 1 => means that invalidate usb rx aggregation */
- /* TH = 0 => means that validate usb rx aggregation, use init value. */
- if (phtpriv->ht_option) {
- if (padapter->registrypriv.wifi_spec == 1)
- threshold = 1;
- else
- threshold = 0;
- SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
- } else {
- threshold = 1;
- SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
- }
+ rtw_set_threshold(padapter);
}
/* the function is >= passive_level */
@@ -1984,7 +1939,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued = (phtpriv->agg_enable_bitmap >> priority) & 0x1;
issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
- if (0 == issued) {
+ if (issued == 0) {
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
}
@@ -2011,19 +1966,19 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
else
pnetwork = &pmlmepriv->cur_network;
- if (0 < rtw_to_roaming(padapter)) {
+ if (rtw_to_roaming(padapter) > 0) {
memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
while (1) {
do_join_r = rtw_do_join(padapter);
- if (_SUCCESS == do_join_r) {
+ if (do_join_r == _SUCCESS) {
break;
} else {
pmlmepriv->to_roaming--;
- if (0 < pmlmepriv->to_roaming) {
+ if (pmlmepriv->to_roaming > 0) {
continue;
} else {
rtw_indicate_disconnect(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 10d5f1222936..faf23fc950c5 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -14,39 +14,22 @@
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
-static struct mlme_handler mlme_sta_tbl[] = {
- {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
- {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
- {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
- {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
- {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
- {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
-
- /*----------------------------------------------------------
- below 2 are reserved
- -----------------------------------------------------------*/
- {0, "DoReserved", &DoReserved},
- {0, "DoReserved", &DoReserved},
- {WIFI_BEACON, "OnBeacon", &OnBeacon},
- {WIFI_ATIM, "OnATIM", &OnAtim},
- {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
- {WIFI_AUTH, "OnAuth", &OnAuthClient},
- {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
- {WIFI_ACTION, "OnAction", &OnAction},
-};
-
-static struct action_handler OnAction_tbl[] = {
- {RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
- {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction_qos},
- {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction_dls},
- {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
- {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
- {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
- {RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
- {RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
- {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved},
- {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction_wmm},
- {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &OnAction_p2p},
+/* response function for each management frame subtype, do not reorder */
+static mlme_handler mlme_sta_tbl[] = {
+ OnAssocReq,
+ OnAssocRsp,
+ OnAssocReq,
+ OnAssocRsp,
+ OnProbeReq,
+ OnProbeRsp,
+ NULL,
+ NULL,
+ OnBeacon,
+ NULL,
+ OnDisassoc,
+ OnAuthClient,
+ OnDeAuth,
+ OnAction,
};
static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
@@ -71,7 +54,6 @@ extern unsigned char REALTEK_96B_IE[];
/********************************************************
MCS rate definitions
*********************************************************/
-unsigned char MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/********************************************************
@@ -287,11 +269,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
continue;
}
- if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
continue;
- if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ if (((padapter->registrypriv.cbw40_enable & BIT(1)) == 0) &&
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (!reg) {
@@ -320,7 +302,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -330,14 +312,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -352,9 +334,8 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
return chanset_size;
}
-int init_mlme_ext_priv(struct adapter *padapter)
+void init_mlme_ext_priv(struct adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -376,8 +357,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->mlmeext_init = true;
pmlmeext->active_keep_alive_check = true;
-
- return res;
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -394,45 +373,29 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
-{
- u8 *pframe = precv_frame->rx_data;
-
- if (ptable->func) {
- /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
- return;
- ptable->func(padapter, precv_frame);
- }
-}
-
void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
{
int index;
- struct mlme_handler *ptable;
+ mlme_handler fct;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 *pframe = precv_frame->rx_data;
- struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
- if (GetFrameType(pframe) != IEEE80211_FTYPE_MGMT)
+ if (!ieee80211_is_mgmt(hdr->frame_control))
return;
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
+ if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !is_broadcast_ether_addr(hdr->addr1))
return;
- ptable = mlme_sta_tbl;
-
- index = GetFrameSubType(pframe) >> 4;
-
- if (index > 13)
+ index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
+ if (index >= ARRAY_SIZE(mlme_sta_tbl))
return;
- ptable += index;
+ fct = mlme_sta_tbl[index];
if (psta) {
- if (GetRetry(pframe)) {
+ if (ieee80211_has_retry(hdr->frame_control)) {
if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum)
/* drop the duplicate management frame */
return;
@@ -440,13 +403,15 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
- if (GetFrameSubType(pframe) == WIFI_AUTH) {
+ if (ieee80211_is_auth(hdr->frame_control)) {
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- ptable->func = &OnAuth;
+ fct = OnAuth;
else
- ptable->func = &OnAuthClient;
+ fct = OnAuthClient;
}
- _mgt_dispatcher(padapter, ptable, precv_frame);
+
+ if (fct)
+ fct(padapter, precv_frame);
}
static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
@@ -482,7 +447,6 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
u8 is_valid_p2p_probereq = false;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 wifi_test_chk_rate = 1;
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
!rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
@@ -497,25 +461,18 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
/* Commented by Kurt 2012/10/16 */
/* IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
- if (wifi_test_chk_rate == 1) {
- is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
- if (is_valid_p2p_probereq) {
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
- /* FIXME */
- report_survey_event(padapter, precv_frame);
- p2p_listen_state_process(padapter, get_sa(pframe));
-
- return _SUCCESS;
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
- goto _continue;
+ is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
+ if (is_valid_p2p_probereq) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* FIXME */
+ report_survey_event(padapter, precv_frame);
+ p2p_listen_state_process(padapter, get_sa(pframe));
+
+ return _SUCCESS;
}
}
}
-_continue:
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
return _SUCCESS;
@@ -622,7 +579,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
}
/* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct rtw_ieee80211_hdr_3addr), len - sizeof(struct rtw_ieee80211_hdr_3addr));
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
@@ -988,7 +945,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
status = _STATS_FAILURE_;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
/* check if the supported rate is ok */
@@ -1077,7 +1034,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
wpa_ie_len = 0;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
@@ -1272,7 +1229,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -1315,7 +1272,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
int res;
unsigned short status;
struct ndis_802_11_var_ie *pIE;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
@@ -1386,11 +1342,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
report_assoc_result:
- if (res > 0)
- rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
- else
- kfree(pmlmepriv->assoc_rsp);
-
report_join_res(padapter, res);
return _SUCCESS;
@@ -1448,7 +1399,7 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
(pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
ignore_received_deauth = 1;
- } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
+ } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
// TODO: 802.11r
ignore_received_deauth = 1;
}
@@ -1508,126 +1459,76 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
return _SUCCESS;
}
-unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int ret = _FAIL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
- u8 category;
- u8 action;
-
- psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
-
- if (!psta)
- goto exit;
-
- category = frame_body[0];
- if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
- goto exit;
-
- action = frame_body[1];
- switch (action) {
- case RTW_WLAN_ACTION_SPCT_MSR_REQ:
- case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
- case RTW_WLAN_ACTION_SPCT_TPC_REQ:
- case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
- break;
- case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
- break;
- default:
- break;
- }
-
-exit:
- return ret;
-}
-
-unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int OnAction_dls(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
{
- u8 *addr;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
unsigned char *frame_body;
- unsigned char category, action;
- unsigned short tid, status;
+ unsigned short tid;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 *pframe = precv_frame->rx_data;
struct sta_priv *pstapriv = &padapter->stapriv;
/* check RA matches or not */
- if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
return _SUCCESS;
- addr = GetAddr2Ptr(pframe);
- psta = rtw_get_stainfo(pstapriv, addr);
+ psta = rtw_get_stainfo(pstapriv, mgmt->sa);
if (!psta)
return _SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
- category = frame_body[0];
- if (category == RTW_WLAN_CATEGORY_BACK) { /* representing Block Ack */
- if (!pmlmeinfo->HT_enable)
- return _SUCCESS;
- action = frame_body[1];
- switch (action) {
- case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
- memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
- process_addba_req(padapter, (u8 *)&pmlmeinfo->ADDBA_req, addr);
-
- if (pmlmeinfo->bAcceptAddbaReq)
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
- else
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
- break;
- case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
- status = get_unaligned_le16(&frame_body[3]);
- tid = ((frame_body[5] >> 2) & 0x7);
- if (status == 0) { /* successful */
- psta->htpriv.agg_enable_bitmap |= 1 << tid;
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- } else {
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- }
- break;
- case RTW_WLAN_ACTION_DELBA: /* DELBA */
- if ((frame_body[3] & BIT(3)) == 0) {
- psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
- tid = (frame_body[3] >> 4) & 0x0F;
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- }
- /* todo: how to notify the host while receiving DELETE BA */
- break;
- default:
- break;
+ if (!pmlmeinfo->HT_enable)
+ return _SUCCESS;
+ /* All union members start with an action code, it's ok to use addba_req. */
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_req.capab),
+ IEEE80211_ADDBA_PARAM_TID_MASK);
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->enable = pmlmeinfo->bAcceptAddbaReq;
+
+ issue_action_BA(padapter, mgmt->sa, WLAN_ACTION_ADDBA_RESP,
+ pmlmeinfo->bAcceptAddbaReq ?
+ WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_resp.capab),
+ IEEE80211_ADDBA_PARAM_TID_MASK);
+ if (mgmt->u.action.u.addba_resp.status == 0) { /* successful */
+ psta->htpriv.agg_enable_bitmap |= BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ }
+ break;
+ case WLAN_ACTION_DELBA:
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+ IEEE80211_DELBA_PARAM_TID_MASK);
+ if (u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+ IEEE80211_DELBA_PARAM_INITIATOR_MASK) == WLAN_BACK_RECIPIENT) {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
}
+ /* todo: how to notify the host while receiving DELETE BA */
+ break;
+ default:
+ break;
}
+
return _SUCCESS;
}
@@ -1645,7 +1546,7 @@ static int get_reg_classes_full_count(struct p2p_channels *channel_list)
void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_REQ;
@@ -1655,7 +1556,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1672,9 +1573,9 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -1685,8 +1586,8 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -1975,7 +1876,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_RESP;
@@ -1990,7 +1891,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2007,9 +1908,9 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2020,8 +1921,8 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2337,7 +2238,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_CONF;
@@ -2347,7 +2248,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2364,9 +2265,9 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2377,8 +2278,8 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2498,7 +2399,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_INVIT_REQ;
@@ -2509,7 +2410,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2526,9 +2427,9 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2539,8 +2440,8 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2745,7 +2646,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_INVIT_RESP;
@@ -2755,7 +2656,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2772,9 +2673,9 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2785,8 +2686,8 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2935,7 +2836,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
u8 dialogToken = 1;
u8 oui_subtype = P2P_PROVISION_DISC_REQ;
@@ -2946,7 +2847,7 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2963,9 +2864,9 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
@@ -2976,8 +2877,8 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -3045,7 +2946,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -3067,11 +2968,11 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -3083,7 +2984,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -3291,7 +3192,7 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -3312,11 +3213,11 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (da) {
@@ -3339,8 +3240,8 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &pattrib->pktlen);
@@ -3614,7 +3515,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
u8 result = P2P_STATUS_SUCCESS;
u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
@@ -3626,7 +3527,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
return _SUCCESS;
- len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ len -= sizeof(struct ieee80211_hdr_3addr);
switch (frame_body[6]) { /* OUI Subtype */
case P2P_GO_NEGO_REQ:
@@ -3668,7 +3569,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
pwdinfo->nego_req_info.benable = false;
result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
- if (P2P_STATUS_SUCCESS == result) {
+ if (result == P2P_STATUS_SUCCESS) {
if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3683,7 +3584,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
break;
case P2P_GO_NEGO_CONF:
result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
- if (P2P_STATUS_SUCCESS == result) {
+ if (result == P2P_STATUS_SUCCESS) {
if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3867,7 +3768,7 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
ret = on_action_public_p2p(precv_frame);
@@ -3880,7 +3781,7 @@ static unsigned int on_action_public_default(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 token;
token = frame_body[2];
@@ -3898,7 +3799,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
@@ -3906,7 +3807,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
goto exit;
category = frame_body[0];
- if (category != RTW_WLAN_CATEGORY_PUBLIC)
+ if (category != WLAN_CATEGORY_PUBLIC)
goto exit;
action = frame_body[1];
@@ -3923,16 +3824,6 @@ exit:
return ret;
}
-unsigned int OnAction_ht(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
{
u8 *frame_body;
@@ -3945,7 +3836,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
if (category != RTW_WLAN_CATEGORY_P2P)
@@ -3954,7 +3845,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
return _SUCCESS;
- len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ len -= sizeof(struct ieee80211_hdr_3addr);
OUI_Subtype = frame_body[5];
switch (OUI_Subtype) {
@@ -3975,29 +3866,22 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
{
- int i;
- unsigned char category;
- struct action_handler *ptable;
- unsigned char *frame_body;
- u8 *pframe = precv_frame->rx_data;
-
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- category = frame_body[0];
-
- for (i = 0; i < sizeof(OnAction_tbl) / sizeof(struct action_handler); i++) {
- ptable = &OnAction_tbl[i];
- if (category == ptable->num)
- ptable->func(padapter, precv_frame);
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_BACK:
+ OnAction_back(padapter, precv_frame);
+ break;
+ case WLAN_CATEGORY_PUBLIC:
+ on_action_public(padapter, precv_frame);
+ break;
+ case RTW_WLAN_CATEGORY_P2P:
+ OnAction_p2p(padapter, precv_frame);
+ break;
}
return _SUCCESS;
}
-unsigned int DoReserved(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
{
struct xmit_frame *pmgntframe;
@@ -4154,7 +4038,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -4177,9 +4061,9 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
eth_broadcast_addr(pwlanhdr->addr1);
@@ -4190,8 +4074,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* pmlmeext->mgnt_seq++; */
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
/* for P2P : Primary Device Type & Device Name */
@@ -4274,8 +4158,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
u8 *wps_ie;
uint wps_ielen;
u8 sr = 0;
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct rtw_ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
- pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+ pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -4362,7 +4246,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -4386,12 +4270,12 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
bssid = cur_network->MacAddress;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -4401,7 +4285,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -4511,7 +4395,7 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
unsigned char bssrate[NumRates];
@@ -4531,11 +4415,11 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (da) {
@@ -4554,8 +4438,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &pattrib->pktlen);
@@ -4629,7 +4513,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int val32;
u16 val16;
@@ -4650,17 +4534,17 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_AUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (psta) {/* for AP mode */
memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
@@ -4734,7 +4618,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
SetPrivacy(fctrl);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->encrypt = _WEP40_;
@@ -4753,7 +4637,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
{
struct xmit_frame *pmgntframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct pkt_attrib *pattrib;
unsigned char *pbuf, *pframe;
unsigned short val;
@@ -4778,9 +4662,9 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
@@ -4794,7 +4678,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
else
return;
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen += pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -4884,7 +4768,7 @@ void issue_assocreq(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe, *p;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
__le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
@@ -4910,9 +4794,9 @@ void issue_assocreq(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -4922,8 +4806,8 @@ void issue_assocreq(struct adapter *padapter)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ASSOCREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* caps */
@@ -5184,7 +5068,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
@@ -5209,9 +5093,9 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5230,8 +5114,8 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -5286,7 +5170,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned short *qc;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5310,9 +5194,9 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5336,8 +5220,8 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pframe += sizeof(struct ieee80211_qos_hdr);
+ pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -5390,7 +5274,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5416,9 +5300,9 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -5429,8 +5313,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DEAUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
le_tmp = cpu_to_le16(reason);
pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &pattrib->pktlen);
@@ -5481,7 +5365,7 @@ exit:
void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
{
- u8 category = RTW_WLAN_CATEGORY_BACK;
+ u8 category = WLAN_CATEGORY_BACK;
u16 start_seq;
u16 BA_para_set;
u16 reason_code;
@@ -5491,7 +5375,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
u8 *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5511,9 +5395,9 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
/* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
@@ -5525,8 +5409,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &pattrib->pktlen);
@@ -5599,7 +5483,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct wlan_network *pnetwork = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5615,7 +5499,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmeinfo->bwmode_updated)
return;
- category = RTW_WLAN_CATEGORY_PUBLIC;
+ category = WLAN_CATEGORY_PUBLIC;
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
@@ -5629,9 +5513,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
@@ -5642,8 +5526,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -5759,32 +5643,38 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
unsigned int send_beacon(struct adapter *padapter)
{
- u8 bxmitok = false;
+ bool bxmitok = false;
int issue = 0;
int poll = 0;
- u32 start = jiffies;
+ clear_beacon_valid_bit(padapter);
- SetHwReg8188EU(padapter, HW_VAR_BCN_VALID, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
yield();
- GetHwReg8188EU(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ bxmitok = get_beacon_valid_bit(padapter);
poll++;
} while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return _FAIL;
- if (!bxmitok) {
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped || !bxmitok)
return _FAIL;
- } else {
- rtw_get_passing_time_ms(start);
- return _SUCCESS;
- }
+ return _SUCCESS;
+}
+
+bool get_beacon_valid_bit(struct adapter *adapter)
+{
+ /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
+ return BIT(0) & rtw_read8(adapter, REG_TDECTRL + 2);
+}
+
+void clear_beacon_valid_bit(struct adapter *adapter)
+{
+ /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+ rtw_write8(adapter, REG_TDECTRL + 2, rtw_read8(adapter, REG_TDECTRL + 2) | BIT(0));
}
/****************************************************************************
@@ -5793,13 +5683,27 @@ Following are some utitity fuctions for WiFi MLME
*****************************************************************************/
+static void rtw_set_initial_gain(struct adapter *adapter, u8 gain)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+ struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+ if (gain == 0xff) {
+ /* restore rx gain */
+ ODM_Write_DIG(odmpriv, digtable->BackupIGValue);
+ } else {
+ digtable->BackupIGValue = digtable->CurIGValue;
+ ODM_Write_DIG(odmpriv, gain);
+ }
+}
+
void site_survey(struct adapter *padapter)
{
unsigned char survey_channel = 0, val8;
enum rt_scan_type ScanType = SCAN_PASSIVE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u32 initialgain = 0;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
@@ -5877,8 +5781,8 @@ void site_survey(struct adapter *padapter)
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
- initialgain = 0xff; /* restore RX GAIN */
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* restore RX GAIN */
+ rtw_set_initial_gain(padapter, 0xff);
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
@@ -5911,8 +5815,8 @@ void site_survey(struct adapter *padapter)
/* config MSR */
Set_MSR(padapter, (pmlmeinfo->state & 0x3));
- initialgain = 0xff; /* restore RX GAIN */
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* restore RX GAIN */
+ rtw_set_initial_gain(padapter, 0xff);
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
@@ -5950,7 +5854,7 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
__le32 le32_tmp;
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
@@ -5980,13 +5884,13 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* get the signal strength */
bssid->Rssi = precv_frame->attrib.phy_info.recvpower; /* in dBM.raw data */
bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
- GetHalDefVar8188EUsb(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
+ bssid->PhyInfo.Optimum_antenna = rtw_current_antenna(padapter);
/* checking SSID */
p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
@@ -6087,10 +5991,58 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
return _SUCCESS;
}
+static void rtw_set_bssid(struct adapter *adapter, u8 *bssid)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(adapter, REG_BSSID + i, bssid[i]);
+}
+
+static void mlme_join(struct adapter *adapter, int type)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ u8 retry_limit = 0x30;
+
+ switch (type) {
+ case 0:
+ /* prepare to join */
+ /* enable to rx data frame, accept all data frame */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ rtw_write32(adapter, REG_RCR,
+ rtw_read32(adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
+ retry_limit = 48;
+ } else {
+ /* ad-hoc mode */
+ retry_limit = 0x7;
+ }
+ break;
+ case 1:
+ /* joinbss_event call back when join res < 0 */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+ break;
+ case 2:
+ /* sta add event call back */
+ /* enable update TSF */
+ rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) & (~BIT(4)));
+
+ if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
+ retry_limit = 0x7;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write16(adapter, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT);
+}
+
void start_create_ibss(struct adapter *padapter)
{
unsigned short caps;
- u8 join_type;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
@@ -6121,9 +6073,8 @@ void start_create_ibss(struct adapter *padapter)
report_join_res(padapter, -1);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
} else {
- SetHwReg8188EU(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
- join_type = 0;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_set_bssid(padapter, padapter->registrypriv.dev_network.MacAddress);
+ mlme_join(padapter, 0);
report_join_res(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
@@ -6421,7 +6372,7 @@ void report_survey_event(struct adapter *padapter, struct recv_frame *precv_fram
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
@@ -6471,7 +6422,7 @@ void report_surveydone_event(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
if (!pcmd_obj)
return;
@@ -6513,7 +6464,7 @@ void report_join_res(struct adapter *padapter, int res)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
@@ -6610,7 +6561,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
if (!pcmd_obj)
return;
@@ -6696,13 +6647,11 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 join_type;
u16 media_status;
if (join_res < 0) {
- join_type = 1;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_join(padapter, 1);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6721,7 +6670,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
}
/* turn on dynamic functions */
- Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_RESET, NULL);
/* update IOT-releated issue */
update_IOT_info(padapter);
@@ -6750,13 +6699,13 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
- SetHwReg8188EU(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+ rtw_set_max_rpt_macid(padapter, psta->mac_id);
+
media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
- join_type = 2;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ mlme_join(padapter, 2);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
/* correcting TSF */
@@ -6769,7 +6718,6 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 join_type;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
@@ -6786,9 +6734,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
}
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
-
- join_type = 2;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ mlme_join(padapter, 2);
}
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -6800,14 +6746,27 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
update_sta_info(padapter, psta);
}
+static void mlme_disconnect(struct adapter *adapter)
+{
+ /* Set RCR to not to receive data frame when NO LINK state */
+ /* reject all data frames */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+
+ /* reset TSF */
+ rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+
+ /* disable update TSF */
+ rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) | BIT(4));
+}
+
void mlmeext_sta_del_event_callback(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_disconnect(padapter);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6951,7 +6910,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
if (pmlmeinfo->FW_sta_info[i].retry < 3) {
@@ -6996,11 +6955,11 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->scan_abort = false;/* reset */
}
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c)
goto exit_survey_timer_hdl;
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
@@ -7122,7 +7081,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7146,7 +7105,6 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
{
- u8 join_type;
struct ndis_802_11_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -7170,7 +7128,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* set MSR to nolink -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ mlme_disconnect(padapter);
}
rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
@@ -7243,9 +7201,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* config the initial gain under linking, need to write the BB registers */
- SetHwReg8188EU(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
- join_type = 0;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_set_bssid(padapter, pmlmeinfo->network.MacAddress);
+ mlme_join(padapter, 0);
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7266,8 +7223,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_disconnect(padapter);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7346,7 +7303,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
u8 bdelayscan = false;
u8 val8;
- u32 initialgain;
u32 i;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
@@ -7391,15 +7347,14 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
/* config the initial gain under scanning, need to write the BB registers */
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- initialgain = 0x1E;
+ rtw_set_initial_gain(padapter, 0x1e);
else
- initialgain = 0x28;
+ rtw_set_initial_gain(padapter, 0x28);
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* set MSR to no link state */
Set_MSR(padapter, _HW_STATE_NOLINK_);
@@ -7538,13 +7493,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- ptxBeacon_parm = kzalloc(sizeof(struct Tx_Beacon_param), GFP_ATOMIC);
+ ptxBeacon_parm = kzalloc(sizeof(*ptxBeacon_parm), GFP_ATOMIC);
if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index 48500fb82250..beffe5b16f1e 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -111,7 +111,7 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -132,9 +132,9 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -145,8 +145,8 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -166,12 +166,12 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_DEVDISC_RESP;
@@ -189,9 +189,9 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -202,8 +202,8 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P public action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -233,7 +233,7 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
{
struct adapter *padapter = pwdinfo->padapter;
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
__be32 p2poui = cpu_to_be32(P2POUI);
@@ -243,7 +243,7 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -259,9 +259,9 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -272,8 +272,8 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -311,7 +311,7 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -334,9 +334,9 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -347,8 +347,8 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -872,7 +872,7 @@ u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
}
psta->dev_name_len = 0;
- if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(__be16 *)pattr_content)) {
+ if (be16_to_cpu(*(__be16 *)pattr_content) == WPS_ATTR_DEVICE_NAME) {
dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content + 2));
psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
@@ -900,7 +900,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u8 *p2p_ie;
u32 p2p_ielen = 0;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
@@ -951,7 +951,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
/* issue Device Discoverability Response */
issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
- return (status == P2P_STATUS_SUCCESS) ? true : false;
+ return status == P2P_STATUS_SUCCESS;
}
u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
@@ -967,7 +967,7 @@ u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
u16 uconfig_method = 0;
__be16 be_tmp;
- frame_body = (pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (pframe + sizeof(struct ieee80211_hdr_3addr));
wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
if (wpsie) {
@@ -1213,7 +1213,7 @@ u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe
if (attr_content == P2P_STATUS_SUCCESS) {
/* Do nothing. */
} else {
- if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+ if (attr_content == P2P_STATUS_FAIL_INFO_UNAVAILABLE) {
rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
} else {
rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
@@ -1401,7 +1401,7 @@ u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u8 dialogToken = 0;
u8 status = P2P_STATUS_SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[6];
@@ -1602,7 +1602,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_DISABLE:
pwdinfo->p2p_ps_state = p2p_ps_state;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
pwdinfo->noa_index = 0;
pwdinfo->ctwindow = 0;
@@ -1612,7 +1612,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
if (pwrpriv->smart_ps == 0) {
pwrpriv->smart_ps = 2;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
}
}
break;
@@ -1623,10 +1623,10 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (pwdinfo->ctwindow > 0) {
if (pwrpriv->smart_ps != 0) {
pwrpriv->smart_ps = 0;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
}
}
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
}
break;
case P2P_PS_SCAN:
@@ -1634,7 +1634,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_ALLSTASLEEP:
if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
pwdinfo->p2p_ps_state = p2p_ps_state;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
}
break;
default:
@@ -1891,7 +1891,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
/* leave IPS/Autosuspend */
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = _FAIL;
goto exit;
}
@@ -1905,7 +1905,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
init_wifidirect_info(padapter, role);
} else if (role == P2P_ROLE_DISABLE) {
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = _FAIL;
goto exit;
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 7beabf82eb92..7b816b824947 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -59,7 +59,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->rf_pwrstate = rf_on;
}
- if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
for (keyid = 0; keyid < 4; keyid++) {
if (pmlmepriv->key_mask & BIT(keyid)) {
@@ -133,9 +133,8 @@ void rtw_ps_processor(struct adapter *padapter)
if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
- if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts % 4) == 0)) {
+ if (pwrpriv->rf_pwrstate == rf_on) {
pwrpriv->change_rfpwrstate = rf_off;
-
ips_enter(padapter);
}
exit:
@@ -177,6 +176,19 @@ static bool PS_RDY_CHECK(struct adapter *padapter)
return true;
}
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ /* Force leave RF low power mode for 1T1R to prevent
+ * conflicting setting in firmware power saving sequence.
+ */
+ if (mode != PS_MODE_ACTIVE)
+ ODM_RF_Saving(odmpriv, true);
+ rtl8188e_set_FwPwrMode_cmd(adapter, mode);
+}
+
void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
@@ -186,7 +198,7 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
return;
if (pwrpriv->pwr_mode == ps_mode) {
- if (PS_MODE_ACTIVE == ps_mode)
+ if (ps_mode == PS_MODE_ACTIVE)
return;
if ((pwrpriv->smart_ps == smart_ps) &&
@@ -194,11 +206,10 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
return;
}
- /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
if (ps_mode == PS_MODE_ACTIVE) {
if (pwdinfo->opp_ps == 0) {
pwrpriv->pwr_mode = ps_mode;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ rtw_set_firmware_ps_mode(padapter, ps_mode);
pwrpriv->bFwCurrentInPSMode = false;
}
} else {
@@ -207,14 +218,28 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
pwrpriv->pwr_mode = ps_mode;
pwrpriv->smart_ps = smart_ps;
pwrpriv->bcn_ant_mode = bcn_ant_mode;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ rtw_set_firmware_ps_mode(padapter, ps_mode);
/* Set CTWindow after LPS */
if (pwdinfo->opp_ps == 1)
p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
}
}
+}
+static bool lps_rf_on(struct adapter *adapter)
+{
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+ /* because Fw is unload. */
+ return true;
+ }
+
+ if (rtw_read32(adapter, REG_RCR) & 0x00070000)
+ return false;
+
+ return true;
}
/*
@@ -223,16 +248,13 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
* -1: Timeout
* -2: Other error
*/
-s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
{
- u32 start_time;
- u8 bAwake = false;
+ unsigned long timeout = jiffies + msecs_to_jiffies(delay_ms);
s32 err = 0;
- start_time = jiffies;
while (1) {
- GetHwReg8188EU(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
- if (bAwake)
+ if (lps_rf_on(padapter))
break;
if (padapter->bSurpriseRemoved) {
@@ -240,7 +262,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
break;
}
- if (rtw_get_passing_time_ms(start_time) > delay_ms) {
+ if (time_after(jiffies, timeout)) {
err = -1;
break;
}
@@ -329,13 +351,12 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
- pwrctrlpriv->pwr_state_check_cnts = 0;
pwrctrlpriv->bInSuspend = false;
pwrctrlpriv->bkeepfwalive = false;
pwrctrlpriv->LpsIdleCount = 0;
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -346,58 +367,38 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
timer_setup(&pwrctrlpriv->pwr_state_check_timer, pwr_state_check_handler, 0);
}
-/*
-* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
-* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
-* Return _SUCCESS or _FAIL
-*/
-
-int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+/* Wake the NIC up from: 1)IPS 2)USB autosuspend */
+int rtw_pwr_wakeup(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(3000);
+ unsigned long deny_time;
int ret = _SUCCESS;
- u32 start = jiffies;
-
- if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
-
- if (pwrpriv->ps_processing) {
- while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- msleep(10);
- }
- /* System suspend is not allowed to wakeup */
- if (pwrpriv->bInSuspend) {
- while (pwrpriv->bInSuspend &&
- (rtw_get_passing_time_ms(start) <= 3000 ||
- (rtw_get_passing_time_ms(start) <= 500)))
- msleep(10);
- }
+ while (pwrpriv->ps_processing && time_before(jiffies, timeout))
+ msleep(10);
/* I think this should be check in IPS, LPS, autosuspend functions... */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
ret = _SUCCESS;
goto exit;
}
- if (rf_off == pwrpriv->rf_pwrstate) {
- if (_FAIL == ips_leave(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+
+ if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) {
+ ret = _FAIL;
+ goto exit;
}
- /* TODO: the following checking need to be merged... */
- if (padapter->bDriverStopped || !padapter->bup ||
- !padapter->hw_init_completed) {
- ret = false;
+ if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) {
+ ret = _FAIL;
goto exit;
}
exit:
- if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
+ deny_time = jiffies + msecs_to_jiffies(RTW_PWR_STATE_CHK_INTERVAL);
+ if (time_before(pwrpriv->ips_deny_time, deny_time))
+ pwrpriv->ips_deny_time = deny_time;
return ret;
}
@@ -408,12 +409,12 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
if (mode < PS_MODE_NUM) {
if (pwrctrlpriv->power_mgnt != mode) {
- if (PS_MODE_ACTIVE == mode)
+ if (mode == PS_MODE_ACTIVE)
LeaveAllPowerSaveMode(padapter);
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
}
} else {
ret = -EINVAL;
@@ -431,7 +432,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
return 0;
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
- if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
return -EFAULT;
} else {
return -EINVAL;
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index 8800ea4825ff..df518439aea2 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -71,7 +71,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
list_add_tail(&precvframe->list, &precvpriv->free_recv_queue.queue);
- precvframe->pkt_newalloc = NULL;
precvframe->pkt = NULL;
precvframe->len = 0;
@@ -81,8 +80,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
}
precvpriv->rx_pending_cnt = 1;
- sema_init(&precvpriv->allrxreturnevt, 0);
-
res = rtl8188eu_init_recv_priv(padapter);
timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
@@ -749,6 +746,7 @@ static int sta2ap_data_frame(struct adapter *adapter,
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = precv_frame->rx_data;
+ __le16 fc = *(__le16 *)ptr;
unsigned char *mybssid = get_bssid(pmlmepriv);
int ret = _SUCCESS;
@@ -769,9 +767,8 @@ static int sta2ap_data_frame(struct adapter *adapter,
process_pwrbit_data(adapter, precv_frame);
- if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ if (ieee80211_is_data_qos(fc))
process_wmmps_data(adapter, precv_frame);
- }
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
@@ -795,143 +792,135 @@ exit:
return ret;
}
-static int validate_recv_ctrl_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
+static void validate_recv_ctrl_frame(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- /* uint len = precv_frame->len; */
-
- if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
- return _FAIL;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)hdr;
+ u8 wmmps_ac;
+ struct sta_info *psta;
/* receive the frames that ra(a1) is my address */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
- return _FAIL;
+ if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN))
+ return;
/* only handle ps-poll */
- if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
- u16 aid;
- u8 wmmps_ac = 0;
- struct sta_info *psta = NULL;
+ if (!ieee80211_is_pspoll(hdr->frame_control))
+ return;
- aid = GetAid(pframe);
- psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ psta = rtw_get_stainfo(pstapriv, hdr->addr2);
+ if (!psta || psta->aid != (le16_to_cpu(pspoll->aid) & 0x3FFF))
+ return;
- if (!psta || psta->aid != aid)
- return _FAIL;
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
- /* for rx pkt statistics */
- psta->sta_stats.rx_ctrl_pkts++;
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(0);
+ break;
+ }
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
+ if (wmmps_ac)
+ return;
- if (wmmps_ac)
- return _FAIL;
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
+ if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ spin_lock_bh(&pxmitpriv->lock);
- spin_lock_bh(&pxmitpriv->lock);
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = xmitframe_phead->next;
- xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
+ if (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = xmitframe_plist->next;
- xmitframe_plist = xmitframe_plist->next;
+ list_del_init(&pxmitframe->list);
- list_del_init(&pxmitframe->list);
+ psta->sleepq_len--;
- psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
- if (psta->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.triggered = 1;
- pxmitframe->attrib.triggered = 1;
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ } else {
+ if (pstapriv->tim_bitmap & BIT(psta->aid)) {
+ if (psta->sleepq_len == 0)
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
+ else
+ psta->sleepq_len = 0;
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- } else {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- if (psta->sleepq_len == 0)
- /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
- issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
- else
- psta->sleepq_len = 0;
-
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
}
- spin_unlock_bh(&pxmitpriv->lock);
}
+ spin_unlock_bh(&pxmitpriv->lock);
}
-
- return _FAIL;
}
struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_frame *precv_frame);
-static int validate_recv_mgnt_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
+static void validate_recv_mgnt_frame(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct sta_info *psta;
+ struct ieee80211_hdr *hdr;
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (!precv_frame)
- return _SUCCESS;
+ return;
- /* for rx pkt statistics */
- psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->rx_data));
+ hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
- if (GetFrameSubType(precv_frame->rx_data) == WIFI_BEACON) {
+ if (ieee80211_is_beacon(hdr->frame_control))
psta->sta_stats.rx_beacon_pkts++;
- } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBEREQ) {
+ else if (ieee80211_is_probe_req(hdr->frame_control))
psta->sta_stats.rx_probereq_pkts++;
- } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBERSP) {
- if (!memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->rx_data), ETH_ALEN))
+ else if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ if (!memcmp(padapter->eeprompriv.mac_addr, hdr->addr1, ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)) ||
- is_multicast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)))
+ else if (is_broadcast_mac_addr(hdr->addr1) || is_multicast_mac_addr(hdr->addr1))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
@@ -939,72 +928,44 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
mgt_dispatcher(padapter, precv_frame);
-
- return _SUCCESS;
}
static int validate_recv_data_frame(struct adapter *adapter,
struct recv_frame *precv_frame)
{
- u8 bretry;
- u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
u8 *ptr = precv_frame->rx_data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
- int ret = _SUCCESS;
-
- bretry = GetRetry(ptr);
- pda = get_da(ptr);
- psa = get_sa(ptr);
- pbssid = get_hdr_bssid(ptr);
+ int ret;
- if (!pbssid) {
- ret = _FAIL;
- goto exit;
- }
+ memcpy(pattrib->dst, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(pattrib->src, ieee80211_get_SA(hdr), ETH_ALEN);
- memcpy(pattrib->dst, pda, ETH_ALEN);
- memcpy(pattrib->src, psa, ETH_ALEN);
+ /* address4 is used only if both to_ds and from_ds are set */
+ if (ieee80211_has_a4(hdr->frame_control))
+ return _FAIL;
- memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+ memcpy(pattrib->ra, hdr->addr1, ETH_ALEN);
+ memcpy(pattrib->ta, hdr->addr2, ETH_ALEN);
- switch (pattrib->to_fr_ds) {
- case 0:
- memcpy(pattrib->ra, pda, ETH_ALEN);
- memcpy(pattrib->ta, psa, ETH_ALEN);
- ret = sta2sta_data_frame(adapter, precv_frame, &psta);
- break;
- case 1:
- memcpy(pattrib->ra, pda, ETH_ALEN);
- memcpy(pattrib->ta, pbssid, ETH_ALEN);
+ if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(pattrib->bssid, hdr->addr2, ETH_ALEN);
ret = ap2sta_data_frame(adapter, precv_frame, &psta);
- break;
- case 2:
- memcpy(pattrib->ra, pbssid, ETH_ALEN);
- memcpy(pattrib->ta, psa, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(pattrib->bssid, hdr->addr1, ETH_ALEN);
ret = sta2ap_data_frame(adapter, precv_frame, &psta);
- break;
- case 3:
- memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
- ret = _FAIL;
- break;
- default:
- ret = _FAIL;
- break;
+ } else {
+ memcpy(pattrib->bssid, hdr->addr3, ETH_ALEN);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
}
- if (ret == _FAIL) {
- goto exit;
- } else if (ret == RTW_RX_HANDLED) {
- goto exit;
- }
+ if (ret == _FAIL || ret == RTW_RX_HANDLED)
+ return ret;
- if (!psta) {
- ret = _FAIL;
- goto exit;
- }
+ if (!psta)
+ return _FAIL;
/* psta->rssi = prxcmd->rssi; */
/* psta->signal_quality = prxcmd->sq; */
@@ -1014,16 +975,16 @@ static int validate_recv_data_frame(struct adapter *adapter,
pattrib->ack_policy = 0;
/* parsing QC field */
if (pattrib->qos) {
- pattrib->priority = GetPriority((ptr + 24));
+ pattrib->priority = ieee80211_get_tid(hdr);
pattrib->ack_policy = GetAckpolicy((ptr + 24));
pattrib->amsdu = GetAMsdu((ptr + 24));
- pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+ pattrib->hdrlen = 26;
if (pattrib->priority != 0 && pattrib->priority != 3)
adapter->recvpriv.bIsAnyNonBEPkts = true;
} else {
pattrib->priority = 0;
- pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+ pattrib->hdrlen = 24;
}
if (pattrib->order)/* HT-CTRL 11n */
@@ -1032,10 +993,9 @@ static int validate_recv_data_frame(struct adapter *adapter,
precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
/* decache, drop duplicate recv packets */
- if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
- ret = _FAIL;
- goto exit;
- }
+ if (recv_decache(precv_frame, ieee80211_has_retry(hdr->frame_control),
+ &psta->sta_recvpriv.rxcache) == _FAIL)
+ return _FAIL;
if (pattrib->privacy) {
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
@@ -1047,9 +1007,7 @@ static int validate_recv_data_frame(struct adapter *adapter,
pattrib->icv_len = 0;
}
-exit:
-
- return ret;
+ return _SUCCESS;
}
static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv_frame)
@@ -1059,11 +1017,8 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
/* then call check if rx seq/frag. duplicated. */
int retval = _FAIL;
- u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- u8 *ptr = precv_frame->rx_data;
- __le16 fc = *(__le16 *)ptr;
- u8 ver = (unsigned char)(*ptr) & 0x3;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
@@ -1072,32 +1027,26 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
pmlmeext->channel_set[ch_set_idx].rx_count++;
}
- /* add version chk */
- if (ver != 0)
+ if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_VERS)) != 0)
return _FAIL;
- pattrib->to_fr_ds = get_tofr_ds(ptr);
-
- pattrib->frag_num = GetFragNum(ptr);
- pattrib->seq_num = GetSequence(ptr);
+ pattrib->frag_num = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ pattrib->seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- pattrib->pw_save = GetPwrMgt(ptr);
- pattrib->mfrag = ieee80211_has_morefrags(fc);
- pattrib->mdata = ieee80211_has_moredata(fc);
- pattrib->privacy = ieee80211_has_protected(fc);
- pattrib->order = ieee80211_has_order(fc);
-
- /* Dump rx packets */
- GetHalDefVar8188EUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
+ pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
+ pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
+ pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
+ pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
+ pattrib->order = ieee80211_has_order(hdr->frame_control);
/* We return _SUCCESS only for data frames. */
- if (ieee80211_is_mgmt(fc))
+ if (ieee80211_is_mgmt(hdr->frame_control))
validate_recv_mgnt_frame(adapter, precv_frame);
- else if (ieee80211_is_ctl(fc))
+ else if (ieee80211_is_ctl(hdr->frame_control))
validate_recv_ctrl_frame(adapter, precv_frame);
- else if (ieee80211_is_data(fc)) {
+ else if (ieee80211_is_data(hdr->frame_control)) {
rtw_led_control(adapter, LED_CTL_RX);
- pattrib->qos = ieee80211_is_data_qos(fc);
+ pattrib->qos = ieee80211_is_data_qos(hdr->frame_control);
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
@@ -1284,8 +1233,9 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_fr
psta_addr = pfhdr->attrib.ta;
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (!psta) {
- u8 type = GetFrameType(pfhdr->rx_data);
- if (type != WIFI_DATA_TYPE) {
+ __le16 fc = *(__le16 *)pfhdr->rx_data;
+
+ if (ieee80211_is_data(fc)) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
} else {
@@ -1723,12 +1673,9 @@ static int recv_func_prehandle(struct adapter *padapter, struct recv_frame *rfra
/* check the frame crtl field and decache */
ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS) {
+ if (ret != _SUCCESS)
rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
- goto exit;
- }
-exit:
return ret;
}
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
index 2cdcdfd5ca5c..5bba57d18b5f 100644
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ b/drivers/staging/r8188eu/core/rtw_security.c
@@ -63,7 +63,7 @@ void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
}
@@ -504,7 +504,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
} else {
@@ -1133,7 +1133,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
} else {
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index 91ff82f24f1f..357f98e22d8a 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -470,9 +470,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 665b077190bc..392a65783f32 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -276,14 +276,6 @@ void Restore_DM_Func_Flag(struct adapter *padapter)
SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
}
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
-{
- if (enable)
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
- else
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
-}
-
void Set_MSR(struct adapter *padapter, u8 type)
{
u8 val8;
@@ -511,6 +503,31 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
return true;
}
+static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask)
+{
+ u8 acmctrl = rtw_read8(adapter, REG_ACMHWCTRL);
+
+ if (acm_mask > 1)
+ acmctrl = acmctrl | 0x1;
+
+ if (acm_mask & BIT(3))
+ acmctrl |= ACMHW_VOQEN;
+ else
+ acmctrl &= (~ACMHW_VOQEN);
+
+ if (acm_mask & BIT(2))
+ acmctrl |= ACMHW_VIQEN;
+ else
+ acmctrl &= (~ACMHW_VIQEN);
+
+ if (acm_mask & BIT(1))
+ acmctrl |= ACMHW_BEQEN;
+ else
+ acmctrl &= (~ACMHW_BEQEN);
+
+ rtw_write8(adapter, REG_ACMHWCTRL, acmctrl);
+}
+
void WMMOnAssocRsp(struct adapter *padapter)
{
u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
@@ -522,6 +539,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct hal_data_8188e *haldata = &padapter->haldata;
if (pmlmeinfo->WMM_enable == 0) {
padapter->mlmepriv.acm_mask = 0;
@@ -550,7 +568,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
switch (ACI) {
case 0x0:
- SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ haldata->AcParam_BE = acParm;
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, acParm);
acm_mask |= (ACM ? BIT(1) : 0);
edca[XMIT_BE_QUEUE] = acParm;
break;
@@ -572,7 +591,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
}
if (padapter->registrypriv.acm_method == 1)
- SetHwReg8188EU(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ set_acm_ctrl(padapter, acm_mask);
else
padapter->mlmepriv.acm_mask = acm_mask;
@@ -743,6 +762,35 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
memcpy(&pmlmeinfo->HT_info, pIE->data, pIE->Length);
}
+static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
+{
+ u8 sec_spacing;
+
+ if (spacing <= 7) {
+ switch (adapter->securitypriv.dot11PrivacyAlgrthm) {
+ case _NO_PRIVACY_:
+ case _AES_:
+ sec_spacing = 0;
+ break;
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ sec_spacing = 6;
+ break;
+ default:
+ sec_spacing = 7;
+ break;
+ }
+
+ if (spacing < sec_spacing)
+ spacing = sec_spacing;
+
+ rtw_write8(adapter, REG_AMPDU_MIN_SPACE,
+ (rtw_read8(adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | spacing);
+ }
+}
+
void HTOnAssocRsp(struct adapter *padapter)
{
unsigned char max_AMPDU_len;
@@ -767,7 +815,7 @@ void HTOnAssocRsp(struct adapter *padapter)
min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
- SetHwReg8188EU(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ set_min_ampdu_spacing(padapter, min_MPDU_spacing);
SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
}
@@ -846,7 +894,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
if (!is_client_associated_to_ap(Adapter))
return true;
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
@@ -867,7 +915,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* check bw and channel offset */
/* parsing HT_CAP_IE */
@@ -916,7 +964,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
else
hidden_ssid = false;
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ if (p && (!hidden_ssid && (*(p + 1)))) {
memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
bssid->Ssid.SsidLength = *(p + 1);
} else {
@@ -1275,14 +1323,10 @@ void update_IOT_info(struct adapter *padapter)
case HT_IOT_PEER_RALINK:
pmlmeinfo->turboMode_cts2self = 0;
pmlmeinfo->turboMode_rtsen = 1;
- /* disable high power */
- Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
break;
case HT_IOT_PEER_REALTEK:
/* rtw_write16(padapter, 0x4cc, 0xffff); */
/* rtw_write16(padapter, 0x546, 0x01c0); */
- /* disable high power */
- Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
break;
default:
pmlmeinfo->turboMode_cts2self = 0;
@@ -1291,26 +1335,36 @@ void update_IOT_info(struct adapter *padapter)
}
}
+static void set_ack_preamble(struct adapter *adapter, bool short_preamble)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ u8 val8;
+
+ /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+ val8 = haldata->nCur40MhzPrimeSC << 5;
+ if (short_preamble)
+ val8 |= 0x80;
+
+ rtw_write8(adapter, REG_RRSR + 2, val8);
+};
+
void update_capinfo(struct adapter *Adapter, u16 updateCap)
{
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- bool ShortPreamble;
/* Check preamble mode, 2005.01.06, by rcnjko. */
/* Mark to update preamble value forever, 2008.03.18 by lanhsin */
if (updateCap & cShortPreamble) { /* Short Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
- ShortPreamble = true;
pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
- SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ set_ack_preamble(Adapter, true);
}
} else { /* Long Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
- ShortPreamble = false;
pmlmeinfo->preamble_mode = PREAMBLE_LONG;
- SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ set_ack_preamble(Adapter, false);
}
}
@@ -1338,7 +1392,6 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
void update_wireless_mode(struct adapter *padapter)
{
int ratelen, network_type = 0;
- u32 SIFS_Timer;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
@@ -1365,10 +1418,12 @@ void update_wireless_mode(struct adapter *padapter)
pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
- SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
- /* change this value if having IOT issues. */
-
- SetHwReg8188EU(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+ /* RESP_SIFS for CCK */
+ rtw_write8(padapter, REG_R2T_SIFS, 0x08);
+ rtw_write8(padapter, REG_R2T_SIFS + 1, 0x08);
+ /* RESP_SIFS for OFDM */
+ rtw_write8(padapter, REG_T2T_SIFS, 0x0a);
+ rtw_write8(padapter, REG_T2T_SIFS + 1, 0x0a);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1411,34 +1466,12 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
return _SUCCESS;
}
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
-{
- struct sta_info *psta;
- u16 tid;
- u16 param;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- psta = rtw_get_stainfo(pstapriv, addr);
-
- if (psta) {
- param = le16_to_cpu(preq->BA_para_set);
- tid = (param >> 2) & 0x0f;
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
- }
-}
-
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
{
u8 *pIE;
__le32 *pbuf;
- pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
pbuf = (__le32 *)pIE;
pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1));
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index c2a550e7250e..3d8e9dea7651 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -52,8 +52,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
- Please insert all the queue initializaiton using rtw_init_queue below
- */
+ * Please insert all the queue initializaiton using rtw_init_queue below
+ */
pxmitpriv->adapter = padapter;
@@ -66,10 +66,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
- Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
- and initialize free_xmit_frame below.
- Please also apply free_txobj to link_up all the xmit_frames...
- */
+ * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * and initialize free_xmit_frame below.
+ * Please also apply free_txobj to link_up all the xmit_frames...
+ */
pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
@@ -178,7 +178,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res) {
+ res = _FAIL;
+ goto exit;
+ }
+
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -399,7 +404,7 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
pattrib->priority = user_prio;
pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
- pattrib->subtype = WIFI_QOS_DATA_TYPE;
+ pattrib->subtype = IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA;
}
static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
@@ -448,14 +453,12 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
_rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
- if (ETH_P_IP == pattrib->ether_type) {/* IP header */
- if (((tmp[21] == 68) && (tmp[23] == 67)) ||
- ((tmp[21] == 67) && (tmp[23] == 68))) {
- /* 68 : UDP BOOTP client */
- /* 67 : UDP BOOTP server */
- /* Use low rate to send DHCP packet. */
- pattrib->dhcp_pkt = 1;
- }
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ /* Use low rate to send DHCP packet. */
+ pattrib->dhcp_pkt = 1;
}
}
}
@@ -497,7 +500,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
pattrib->hdrlen = WLAN_HDR_A3_LEN;
- pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->subtype = IEEE80211_FTYPE_DATA;
pattrib->priority = 0;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
@@ -642,7 +645,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = (u8 *)RND4((size_t)(payload));
+ payload = PTR_ALIGN(payload, 4);
payload = payload + pattrib->hdrlen + pattrib->iv_len;
if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -696,13 +699,13 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
{
u16 *qc;
- struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
u8 qos_option = false;
int res = _SUCCESS;
- __le16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_control;
struct sta_info *psta;
@@ -717,7 +720,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
- if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if (pattrib->subtype & IEEE80211_FTYPE_DATA) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* to_ds = 1, fr_ds = 0; */
/* Data transfer to AP */
@@ -853,22 +856,19 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
}
/*
-
-This sub-routine will perform all the following:
-
-1. remove 802.3 header.
-2. create wlan_header, based on the info in pxmitframe
-3. append sta's iv/ext-iv
-4. append LLC
-5. move frag chunk from pframe to pxmitframe->mem
-6. apply sw-encrypt, if necessary.
-
-*/
+ * This sub-routine will perform all the following:
+ *
+ * 1. remove 802.3 header.
+ * 2. create wlan_header, based on the info in pxmitframe
+ * 3. append sta's iv/ext-iv
+ * 4. append LLC
+ * 5. move frag chunk from pframe to pxmitframe->mem
+ * 6. apply sw-encrypt, if necessary.
+ */
s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
{
struct pkt_file pktfile;
s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
- size_t addr;
u8 *pframe, *mem_start;
u8 hw_hdr_offset;
struct sta_info *psta;
@@ -985,9 +985,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
break;
}
- addr = (size_t)(pframe);
-
- mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
}
@@ -1210,24 +1208,22 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
}
/*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ */
struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
{
/*
- Please remember to use all the osdep_service api,
- and lock/unlock or _enter/_exit critical to protect
- pfree_xmit_queue
- */
+ * Please remember to use all the osdep_service api,
+ * and lock/unlock or _enter/_exit critical to protect
+ * pfree_xmit_queue
+ */
struct xmit_frame *pxframe = NULL;
struct list_head *plist, *phead;
@@ -1474,7 +1470,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+int rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1482,6 +1478,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
pxmitpriv->hwxmits = kzalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry, GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return -ENOMEM;
hwxmits = pxmitpriv->hwxmits;
@@ -1498,6 +1496,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
} else {
}
+
+ return 0;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index e7f834b02567..7901d0afa2e7 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -170,7 +170,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u32 arraylen = sizeof(array_agc_tab_1t_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_agc_tab_1t_8188e);
u32 *array = array_agc_tab_1t_8188e;
bool biol = false;
struct adapter *adapter = dm_odm->Adapter;
@@ -446,7 +446,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u32 arraylen = sizeof(array_phy_reg_1t_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_phy_reg_1t_8188e);
u32 *array = array_phy_reg_1t_8188e;
bool biol = false;
struct adapter *adapter = dm_odm->Adapter;
@@ -651,7 +651,7 @@ void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex;
u32 i = 0;
- u32 arraylen = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_phy_reg_pg_8188e);
u32 *array = array_phy_reg_pg_8188e;
hex = ODM_ITRF_USB << 8;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index 20ce1571fc26..77b25885c63b 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -132,7 +132,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
u32 hex = 0;
u32 i;
- u32 array_len = sizeof(array_MAC_REG_8188E) / sizeof(u32);
+ u32 array_len = ARRAY_SIZE(array_MAC_REG_8188E);
u32 *array = array_MAC_REG_8188E;
bool biol = false;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 9dc888a66d09..08cbfce3808d 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -138,7 +138,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
u32 hex = 0;
u32 i = 0;
- u32 ArrayLen = sizeof(Array_RadioA_1T_8188E) / sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8188E);
u32 *Array = Array_RadioA_1T_8188E;
bool biol = false;
struct adapter *Adapter = pDM_Odm->Adapter;
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
index 5b91aec6a7e3..150ea380c39e 100644
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
@@ -1,30 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-/*++
-
-Module Name:
- HalPwrSeqCmd.c
-
-Abstract:
- Implement HW Power sequence configuration CMD handling routine for Realtek devices.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
- 2011-07-07 Roger Create.
-
---*/
-
#include "../include/HalPwrSeqCmd.h"
-/* Description: */
-/* This routine deals with the Power Configuration CMDs parsing
- * for RTL8723/RTL8188E Series IC.
- * Assumption:
- * We should follow specific format which was released from HW SD.
- */
u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
{
struct wl_pwr_cfg pwrcfgcmd = {0};
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index 06f2a9083056..910cc07f656c 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -44,7 +44,7 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
cnt += sprintf((buf + cnt), "1T1R_");
- cnt += sprintf((buf + cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+ cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
pr_info("%s", buf);
}
@@ -267,7 +267,7 @@ static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
{
struct registry_priv *pregistrypriv = &adapter->registrypriv;
- bool wifi_cfg = (pregistrypriv->wifi_spec) ? true : false;
+ bool wifi_cfg = pregistrypriv->wifi_spec;
bool result = true;
switch (numoutpipe) {
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 87e9a5270be0..54cc3d7789cd 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -65,13 +65,13 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
- isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+ isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
if (isCCKrate) {
u8 cck_agc_rpt;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
cck_highpwr = dm_odm->bCckHighPower;
@@ -170,7 +170,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* Get Rx snr value in DB */
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
}
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
@@ -234,7 +234,7 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
if ((!pPktinfo->bPacketMatchBSSID))
return;
- isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+ isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
/* Smart Antenna Debug Message------------------ */
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index f1464e4ba429..475650dc7301 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -199,16 +199,16 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 rate_len, pktlen;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
eth_broadcast_addr(pwlanhdr->addr1);
@@ -218,8 +218,8 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
/* timestamp will be inserted by hardware */
pframe += 8;
@@ -281,15 +281,15 @@ _ConstructBeacon:
static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
__le16 *fctrl;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
/* Frame control. */
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetPwrMgt(fctrl);
SetFrameSubType(pframe, WIFI_PSPOLL);
@@ -314,7 +314,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
u8 bEosp,
u8 bForcePowerSave)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
@@ -322,9 +322,9 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (bForcePowerSave)
SetPwrMgt(fctrl);
@@ -353,19 +353,19 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
SetSeqNum(pwlanhdr, 0);
if (bQoS) {
- struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+ struct ieee80211_qos_hdr *pwlanqoshdr;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
- SetPriority(&pwlanqoshdr->qc, AC);
- SetEOSP(&pwlanqoshdr->qc, bEosp);
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pktlen = sizeof(struct ieee80211_qos_hdr);
} else {
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_qos_hdr);
}
*pLength = pktlen;
@@ -373,7 +373,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u8 *mac, *bssid;
u32 pktlen;
@@ -381,12 +381,12 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&adapt->eeprompriv);
bssid = cur_network->MacAddress;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -395,7 +395,7 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
SetSeqNum(pwlanhdr, 0);
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe += pktlen;
if (cur_network->IELength > MAX_IE_SZ)
@@ -557,8 +557,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
haldata->RegFwHwTxQCtrl &= (~BIT(6));
- /* Clear beacon valid check bit. */
- SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+ clear_beacon_valid_bit(adapt);
DLBcnCount = 0;
poll = 0;
do {
@@ -569,7 +568,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
yield();
/* mdelay(10); */
/* check rsvd page download OK. */
- GetHwReg8188EU(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ bcn_valid = get_beacon_valid_bit(adapt);
poll++;
} while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
} while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
@@ -597,7 +596,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Update RSVD page location H2C to Fw. */
if (bcn_valid)
- SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+ clear_beacon_valid_bit(adapt);
/* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
/* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 6811be95da9a..e17375a74f17 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -33,17 +33,16 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
{
s32 status = _FAIL;
u8 reg_0x88 = 0;
- u32 start = 0, passing_time = 0;
+ unsigned long timeout;
control = control & 0x0f;
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control);
- start = jiffies;
+ timeout = jiffies + msecs_to_jiffies(1000);
while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
- (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ time_before(jiffies, timeout))
;
- }
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
@@ -187,8 +186,8 @@ static void efuse_read_phymap_from_txpktbuf(
u16 *size /* for efuse content: the max byte to read. will update to byte read */
)
{
+ unsigned long timeout;
u16 dbg_addr = 0;
- u32 start = 0, passing_time = 0;
__le32 lo32 = 0, hi32 = 0;
u16 len = 0, count = 0;
int i = 0;
@@ -207,9 +206,8 @@ static void efuse_read_phymap_from_txpktbuf(
rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i);
rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
- start = jiffies;
- while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) &&
- (passing_time = rtw_get_passing_time_ms(start)) < 1000)
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) && time_before(jiffies, timeout))
rtw_usleep_os(100);
/* data from EEPROM needs to be in LE */
@@ -505,7 +503,6 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
- ChipVersion.ROMVer = 0; /* ROM code version. */
dump_chip_info(ChipVersion);
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index ea75ff11ad17..4864dafd887b 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -378,10 +378,10 @@ phy_InitBBRFRegisterDefinition(
/* Tx AGC Gain Stage (same for all path. Should we remove this?) */
pHalData->PHYRegDef.rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- /* Tranceiver A~D HSSI Parameter-1 */
+ /* Transceiver A~D HSSI Parameter-1 */
pHalData->PHYRegDef.rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
- /* Tranceiver A~D HSSI Parameter-2 */
+ /* Transceiver A~D HSSI Parameter-2 */
pHalData->PHYRegDef.rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
/* RF switch Control */
@@ -405,10 +405,10 @@ phy_InitBBRFRegisterDefinition(
/* Tx AFE control 2 */
pHalData->PHYRegDef.rfTxAFE = rOFDM0_XATxAFE;
- /* Tranceiver LSSI Readback SI mode */
+ /* Transceiver LSSI Readback SI mode */
pHalData->PHYRegDef.rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- /* Tranceiver LSSI Readback PI mode */
+ /* Transceiver LSSI Readback PI mode */
pHalData->PHYRegDef.rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
index 9bf7a9248026..dff0cba751df 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
@@ -113,12 +113,13 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
struct hal_data_8188e *pHalData = &padapter->haldata;
struct phy_info *pPHYInfo = &pattrib->phy_info;
u8 *wlanhdr = precvframe->rx_data;
+ __le16 fc = *(__le16 *)wlanhdr;
struct odm_per_pkt_info pkt_info;
u8 *sa = NULL;
struct sta_priv *pstapriv;
struct sta_info *psta;
- pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+ pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(fc)) &&
!pattrib->icv_err && !pattrib->crc_err &&
!memcmp(get_hdr_bssid(wlanhdr),
get_bssid(&padapter->mlmepriv), ETH_ALEN));
@@ -127,9 +128,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
(!memcmp(get_da(wlanhdr),
myid(&padapter->eeprompriv), ETH_ALEN));
- pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
- (GetFrameSubType(wlanhdr) == WIFI_BEACON);
-
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && ieee80211_is_beacon(fc);
if (pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
sa = padapter->mlmepriv.cur_network.network.MacAddress;
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index 55032d7ae7e3..bdfa51949289 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -347,7 +347,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
mem_addr += w_sz;
- mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
+ mem_addr = PTR_ALIGN(mem_addr, 4);
}
rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -437,7 +437,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
pfirstframe = pxmitframe;
len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
pbuf_tail = len;
- pbuf = _RND8(pbuf_tail);
+ pbuf = round_up(pbuf_tail, 8);
/* check pkt amount in one bulk */
desc_cnt = 0;
@@ -488,7 +488,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
- if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
+ if (pbuf + len > MAX_XMITBUF_SZ) {
pxmitframe->agg_num = 1;
pxmitframe->pkt_offset = 1;
break;
@@ -511,7 +511,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
/* handle pointer and stop condition */
pbuf_tail = pbuf + len;
- pbuf = _RND8(pbuf_tail);
+ pbuf = round_up(pbuf_tail, 8);
pfirstframe->agg_num++;
if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index a92774352d2d..a217272a07f8 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -123,7 +123,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
if (haldata->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (haldata->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
@@ -539,10 +539,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Save target channel */
haldata->CurrentChannel = 6;/* default set to 6 */
- if (pwrctrlpriv->reg_rfoff) {
- pwrctrlpriv->rf_pwrstate = rf_off;
- }
-
/* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
/* HW GPIO pin. Before PHY_RFConfig8192C. */
/* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
@@ -942,17 +938,6 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 *val)
}
}
-static void hw_var_set_bssid(struct adapter *Adapter, u8 *val)
-{
- u8 idx = 0;
- u32 reg_bssid;
-
- reg_bssid = REG_BSSID;
-
- for (idx = 0; idx < 6; idx++)
- rtw_write8(Adapter, (reg_bssid + idx), val[idx]);
-}
-
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
@@ -963,9 +948,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_SET_OPMODE:
hw_var_set_opmode(Adapter, val);
break;
- case HW_VAR_BSSID:
- hw_var_set_bssid(Adapter, val);
- break;
case HW_VAR_BASIC_RATE:
{
u16 BrateCfg = 0;
@@ -1024,17 +1006,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
ResumeTxBeacon(Adapter);
}
break;
- case HW_VAR_MLME_DISCONNECT:
- /* Set RCR to not to receive data frame when NO LINK state */
- /* reject all data frames */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
-
- /* reset TSF */
- rtw_write8(Adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
-
- /* disable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4));
- break;
case HW_VAR_MLME_SITESURVEY:
if (*((u8 *)val)) { /* under sitesurvey */
/* config RCR to receive different BSSID & not to receive data frame */
@@ -1065,36 +1036,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN);
}
break;
- case HW_VAR_MLME_JOIN:
- {
- u8 RetryLimit = 0x30;
- u8 type = *((u8 *)val);
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
-
- if (type == 0) { /* prepare to join */
- /* enable to rx data frame.Accept all data frame */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
-
- rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- RetryLimit = 48;
- else /* Ad-hoc Mode */
- RetryLimit = 0x7;
- } else if (type == 1) {
- /* joinbss_event call back when join res < 0 */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
- } else if (type == 2) {
- /* sta add event call back */
- /* enable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
- RetryLimit = 0x7;
- }
- rtw_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
- }
- break;
case HW_VAR_SLOT_TIME:
{
u8 u1bAIFS, aSifsTime;
@@ -1119,26 +1060,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_RESP_SIFS:
- /* RESP_SIFS for CCK */
- rtw_write8(Adapter, REG_R2T_SIFS, val[0]); /* SIFS_T2T_CCK (0x08) */
- rtw_write8(Adapter, REG_R2T_SIFS + 1, val[1]); /* SIFS_R2T_CCK(0x08) */
- /* RESP_SIFS for OFDM */
- rtw_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
- rtw_write8(Adapter, REG_T2T_SIFS + 1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
- break;
- case HW_VAR_ACK_PREAMBLE:
- {
- u8 regTmp;
- u8 bShortPreamble = *((bool *)val);
- /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
- regTmp = (haldata->nCur40MhzPrimeSC) << 5;
- if (bShortPreamble)
- regTmp |= 0x80;
-
- rtw_write8(Adapter, REG_RRSR + 2, regTmp);
- }
- break;
case HW_VAR_DM_FLAG:
podmpriv->SupportAbility = *((u8 *)val);
break;
@@ -1148,73 +1069,11 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
else
podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
break;
- case HW_VAR_DM_FUNC_SET:
- if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
- podmpriv->SupportAbility = pdmpriv->InitODMFlag;
- } else {
- podmpriv->SupportAbility |= *((u32 *)val);
- }
+ case HW_VAR_DM_FUNC_RESET:
+ podmpriv->SupportAbility = pdmpriv->InitODMFlag;
break;
case HW_VAR_DM_FUNC_CLR:
- podmpriv->SupportAbility &= *((u32 *)val);
- break;
- case HW_VAR_AC_PARAM_BE:
- haldata->AcParam_BE = ((u32 *)(val))[0];
- rtw_write32(Adapter, REG_EDCA_BE_PARAM, ((u32 *)(val))[0]);
- break;
- case HW_VAR_ACM_CTRL:
- {
- u8 acm_ctrl = *((u8 *)val);
- u8 AcmCtrl = rtw_read8(Adapter, REG_ACMHWCTRL);
-
- if (acm_ctrl > 1)
- AcmCtrl = AcmCtrl | 0x1;
-
- if (acm_ctrl & BIT(3))
- AcmCtrl |= AcmHw_VoqEn;
- else
- AcmCtrl &= (~AcmHw_VoqEn);
-
- if (acm_ctrl & BIT(2))
- AcmCtrl |= AcmHw_ViqEn;
- else
- AcmCtrl &= (~AcmHw_ViqEn);
-
- if (acm_ctrl & BIT(1))
- AcmCtrl |= AcmHw_BeqEn;
- else
- AcmCtrl &= (~AcmHw_BeqEn);
-
- rtw_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
- }
- break;
- case HW_VAR_AMPDU_MIN_SPACE:
- {
- u8 MinSpacingToSet;
- u8 SecMinSpace;
-
- MinSpacingToSet = *((u8 *)val);
- if (MinSpacingToSet <= 7) {
- switch (Adapter->securitypriv.dot11PrivacyAlgrthm) {
- case _NO_PRIVACY_:
- case _AES_:
- SecMinSpace = 0;
- break;
- case _WEP40_:
- case _WEP104_:
- case _TKIP_:
- case _TKIP_WTMIC_:
- SecMinSpace = 6;
- break;
- default:
- SecMinSpace = 7;
- break;
- }
- if (MinSpacingToSet < SecMinSpace)
- MinSpacingToSet = SecMinSpace;
- rtw_write8(Adapter, REG_AMPDU_MIN_SPACE, (rtw_read8(Adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | MinSpacingToSet);
- }
- }
+ podmpriv->SupportAbility = 0;
break;
case HW_VAR_AMPDU_FACTOR:
{
@@ -1242,221 +1101,15 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_RXDMA_AGG_PG_TH:
- {
- u8 threshold = *((u8 *)val);
- if (threshold == 0)
- threshold = USB_RXAGG_PAGE_COUNT;
- rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
- }
- break;
- case HW_VAR_H2C_FW_PWRMODE:
- {
- u8 psmode = (*(u8 *)val);
-
- /* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
- /* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
- if (psmode != PS_MODE_ACTIVE)
- ODM_RF_Saving(podmpriv, true);
- rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
- }
- break;
- case HW_VAR_H2C_FW_JOINBSSRPT:
- {
- u8 mstatus = (*(u8 *)val);
- rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
- }
- break;
- case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- {
- u8 p2p_ps_state = (*(u8 *)val);
- rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
- }
- break;
- case HW_VAR_INITIAL_GAIN:
- {
- struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
- u32 rx_gain = ((u32 *)(val))[0];
-
- if (rx_gain == 0xff) {/* restore rx gain */
- ODM_Write_DIG(podmpriv, pDigTable->BackupIGValue);
- } else {
- pDigTable->BackupIGValue = pDigTable->CurIGValue;
- ODM_Write_DIG(podmpriv, rx_gain);
- }
- }
- break;
- case HW_VAR_RPT_TIMER_SETTING:
- {
- u16 min_rpt_time = (*(u16 *)val);
- ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
- }
- break;
- case HW_VAR_ANTENNA_DIVERSITY_SELECT:
- {
- u8 Optimum_antenna = (*(u8 *)val);
- u8 Ant;
- /* switch antenna to Optimum_antenna */
- if (haldata->CurAntenna != Optimum_antenna) {
- Ant = (Optimum_antenna == 2) ? MAIN_ANT : AUX_ANT;
- ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, Ant);
-
- haldata->CurAntenna = Optimum_antenna;
- }
- }
- break;
- case HW_VAR_FIFO_CLEARN_UP:
- {
- struct pwrctrl_priv *pwrpriv = &Adapter->pwrctrlpriv;
- u8 trycnt = 100;
-
- /* pause tx */
- rtw_write8(Adapter, REG_TXPAUSE, 0xff);
-
- /* keep sn */
- Adapter->xmitpriv.nqos_ssn = rtw_read16(Adapter, REG_NQOS_SEQ);
-
- if (!pwrpriv->bkeepfwalive) {
- /* RX DMA stop */
- rtw_write32(Adapter, REG_RXPKT_NUM, (rtw_read32(Adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
- do {
- if (!(rtw_read32(Adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
- break;
- } while (trycnt--);
-
- /* RQPN Load 0 */
- rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
- rtw_write32(Adapter, REG_RQPN, 0x80000000);
- mdelay(10);
- }
- }
- break;
- case HW_VAR_TX_RPT_MAX_MACID:
- {
- u8 maxMacid = *val;
- rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, maxMacid + 1);
- }
- break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
break;
- case HW_VAR_BCN_VALID:
- /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
- rtw_write8(Adapter, REG_TDECTRL + 2, rtw_read8(Adapter, REG_TDECTRL + 2) | BIT(0));
- break;
- default:
- break;
- }
-
-}
-
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- switch (variable) {
- case HW_VAR_BCN_VALID:
- /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
- val[0] = (BIT(0) & rtw_read8(Adapter, REG_TDECTRL + 2)) ? true : false;
- break;
- case HW_VAR_DM_FLAG:
- val[0] = podmpriv->SupportAbility;
- break;
- case HW_VAR_FWLPS_RF_ON:
- {
- /* When we halt NIC, we should check if FW LPS is leave. */
- if (Adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
- /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
- /* because Fw is unload. */
- val[0] = true;
- } else {
- u32 valRCR;
- valRCR = rtw_read32(Adapter, REG_RCR);
- valRCR &= 0x00070000;
- if (valRCR)
- val[0] = false;
- else
- val[0] = true;
- }
- }
- break;
- case HW_VAR_CHK_HI_QUEUE_EMPTY:
- *val = ((rtw_read32(Adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0) ? true : false;
- break;
default:
break;
}
}
-/* Query setting of specified variable. */
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- switch (eVariable) {
- case HAL_DEF_IS_SUPPORT_ANT_DIV:
- *((u8 *)pValue) = (haldata->AntDivCfg == 0) ? false : true;
- break;
- case HAL_DEF_CURRENT_ANTENNA:
- *((u8 *)pValue) = haldata->CurAntenna;
- break;
- case HAL_DEF_DBG_DM_FUNC:
- *((u32 *)pValue) = haldata->odmpriv.SupportAbility;
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- *((u8 *)pValue) = haldata->bDumpRxPkt;
- break;
- case HAL_DEF_DBG_DUMP_TXPKT:
- *((u8 *)pValue) = haldata->bDumpTxPkt;
- break;
- default:
- break;
- }
-}
-
-/* Change default setting of specified variable. */
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- switch (eVariable) {
- case HAL_DEF_DBG_DM_FUNC:
- {
- u8 dm_func = *((u8 *)pValue);
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- if (dm_func == 0) { /* disable all dynamic func */
- podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
- } else if (dm_func == 1) {/* disable DIG */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
- } else if (dm_func == 2) {/* disable High power */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
- } else if (dm_func == 3) {/* disable tx power tracking */
- podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
- } else if (dm_func == 5) {/* disable antenna diversity */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
- } else if (dm_func == 6) {/* turn on all dynamic func */
- if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
- struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
- pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
- }
- podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
- }
- }
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- haldata->bDumpRxPkt = *((u8 *)pValue);
- break;
- case HAL_DEF_DBG_DUMP_TXPKT:
- haldata->bDumpTxPkt = *((u8 *)pValue);
- break;
- default:
- break;
- }
-}
-
void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
u8 init_rate = 0;
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index 673c30ed3cce..d5e674542a78 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -16,7 +16,7 @@ static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
int status;
u8 io_buf[4];
- if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ if (adapt->bSurpriseRemoved)
return -EPERM;
status = usb_control_msg_recv(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
@@ -59,7 +59,7 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
int status;
u8 io_buf[VENDOR_CMD_MAX_DATA_LEN];
- if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ if (adapt->bSurpriseRemoved)
return -EPERM;
memcpy(io_buf, data, size);
@@ -260,7 +260,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
if (pkt_copy) {
- pkt_copy->dev = adapt->pnetdev;
precvframe->pkt = pkt_copy;
precvframe->rx_head = pkt_copy->data;
precvframe->rx_end = pkt_copy->data + alloc_sz;
@@ -288,7 +287,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
recvframe_put(precvframe, skb_len);
- pkt_offset = (u16)_RND128(pkt_offset);
+ pkt_offset = (u16)round_up(pkt_offset, 128);
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
if (pattrib->physt)
@@ -415,8 +414,7 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
size_t alignment = 0;
u32 ret = _SUCCESS;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
- adapter->pwrctrlpriv.pnp_bstop_trx)
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return _FAIL;
if (!precvbuf)
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
index 2bc18eabb55d..7a530c7d57eb 100644
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ b/drivers/staging/r8188eu/include/HalVerDef.h
@@ -25,7 +25,6 @@ struct HAL_VERSION {
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
- u8 ROMVer;
};
/* Get element */
@@ -34,10 +33,10 @@ struct HAL_VERSION {
/* HAL_CHIP_TYPE_E */
#define IS_NORMAL_CHIP(version) \
- ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+ (GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP)
/* HAL_VENDOR_E */
#define IS_CHIP_VENDOR_TSMC(version) \
- ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+ (GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)
#endif
diff --git a/drivers/staging/r8188eu/include/basic_types.h b/drivers/staging/r8188eu/include/basic_types.h
index d82b2171d584..ffb21170e898 100644
--- a/drivers/staging/r8188eu/include/basic_types.h
+++ b/drivers/staging/r8188eu/include/basic_types.h
@@ -4,9 +4,6 @@
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
-#define SUCCESS 0
-#define FAIL (-1)
-
#include <linux/types.h>
#define NDIS_OID uint
@@ -14,9 +11,6 @@ typedef void (*proc_t)(void *);
#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
-#define MEM_ALIGNMENT_OFFSET (sizeof(size_t))
-#define MEM_ALIGNMENT_PADDING (sizeof(size_t) - 1)
-
/* port from fw */
/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
* to check correctness */
@@ -31,86 +25,21 @@ typedef void (*proc_t)(void *);
/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val) \
- do { \
- (*((u8 *)(_ptr))) = EF1BYTE(_val) \
- } while (0)
-/* Write le data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val) \
- do { \
- (*((u16 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
-#define WRITEEF4BYTE(_ptr, _val) \
- do { \
- (*((u32 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
+/* Create a bit mask */
#define BIT_LEN_MASK_8(__bitlen) \
(0xFF >> (8 - (__bitlen)))
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
/*Description:
* Return 4-byte value in host byte ordering from
* 4-byte pointer in little-endian system.
*/
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
/*Description:
Translate subfield (continuous bits in little-endian) of 4-byte
value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
( \
(LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 09fc27082f7c..bba88a0ede61 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -26,7 +26,6 @@
#include "rtw_eeprom.h"
#include "sta_info.h"
#include "rtw_mlme.h"
-#include "rtw_debug.h"
#include "rtw_rf.h"
#include "rtw_event.h"
#include "rtw_led.h"
@@ -35,6 +34,7 @@
#include "rtw_ap.h"
#include "rtw_br_ext.h"
#include "rtl8188e_hal.h"
+#include "rtw_fw.h"
#define DRIVERVERSION "v4.1.4_6773.20130222"
@@ -116,11 +116,6 @@ struct registry_priv {
#define MAX_CONTINUAL_URB_ERR 4
-struct rt_firmware {
- u8 *data;
- u32 size;
-};
-
struct dvobj_priv {
struct adapter *if1;
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index 3cededa4dcfc..a56f3d6ca399 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -10,44 +10,16 @@
enum hw_variables {
HW_VAR_SET_OPMODE,
- HW_VAR_BSSID,
HW_VAR_BASIC_RATE,
HW_VAR_CORRECT_TSF,
- HW_VAR_MLME_DISCONNECT,
HW_VAR_MLME_SITESURVEY,
- HW_VAR_MLME_JOIN,
HW_VAR_SLOT_TIME,
- HW_VAR_RESP_SIFS,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_BCN_VALID,
HW_VAR_DM_FLAG,
HW_VAR_DM_FUNC_OP,
- HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_RESET,
HW_VAR_DM_FUNC_CLR,
- HW_VAR_AC_PARAM_BE,
- HW_VAR_ACM_CTRL,
- HW_VAR_AMPDU_MIN_SPACE,
HW_VAR_AMPDU_FACTOR,
- HW_VAR_RXDMA_AGG_PG_TH,
- HW_VAR_H2C_FW_PWRMODE,
- HW_VAR_H2C_FW_JOINBSSRPT,
- HW_VAR_FWLPS_RF_ON,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- HW_VAR_INITIAL_GAIN,
- HW_VAR_ANTENNA_DIVERSITY_SELECT,
- HW_VAR_FIFO_CLEARN_UP,
- HW_VAR_RPT_TIMER_SETTING,
- HW_VAR_TX_RPT_MAX_MACID,
HW_VAR_H2C_MEDIA_STATUS_RPT,
- HW_VAR_CHK_HI_QUEUE_EMPTY,
-};
-
-enum hal_def_variable {
- HAL_DEF_IS_SUPPORT_ANT_DIV,
- HAL_DEF_CURRENT_ANTENNA,
- HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
- HAL_DEF_DBG_DM_FUNC,/* for dbg */
- HAL_DEF_DBG_DUMP_TXPKT,
};
typedef s32 (*c2h_id_filter)(u8 id);
@@ -70,13 +42,9 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-
unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
uint rtw_hal_init(struct adapter *padapter);
uint rtw_hal_deinit(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index 8c20363cdd31..15636a808f52 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -123,24 +123,6 @@ enum NETWORK_TYPE {
WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
};
-#define SUPPORTED_24G_NETTYPE_MSK \
- (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
-
-#define IsSupported24G(NetType) \
- ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
-
-#define IsEnableHWCCK(NetType) \
- IsSupported24G(NetType)
-
-#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
-
-#define IsSupportedTxCCK(NetType) \
- ((NetType) & (WIRELESS_11B) ? true : false)
-#define IsSupportedTxOFDM(NetType) \
- ((NetType) & (WIRELESS_11G) ? true : false)
-#define IsSupportedTxMCS(NetType) \
- ((NetType) & (WIRELESS_11_24N) ? true : false)
-
struct ieee_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
@@ -196,35 +178,6 @@ struct ieee_param {
/* this is stolen from ipw2200 driver */
#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct rtw_ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u8 addr4[ETH_ALEN];
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr_qos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
-} __packed;
-
#define IEEE80211_3ADDR_LEN 24
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_FCS_LEN 4
@@ -636,24 +589,8 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
#define MAXTID 16
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-
/* Action category code */
enum rtw_ieee80211_category {
- RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
- RTW_WLAN_CATEGORY_QOS = 1,
- RTW_WLAN_CATEGORY_DLS = 2,
- RTW_WLAN_CATEGORY_BACK = 3,
- RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
- RTW_WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
- RTW_WLAN_CATEGORY_FT = 6,
- RTW_WLAN_CATEGORY_HT = 7,
- RTW_WLAN_CATEGORY_SA_QUERY = 8,
- RTW_WLAN_CATEGORY_TDLS = 12,
- RTW_WLAN_CATEGORY_WMM = 17,
RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
};
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
index 1902aa48a255..f131e17167bf 100644
--- a/drivers/staging/r8188eu/include/odm.h
+++ b/drivers/staging/r8188eu/include/odm.h
@@ -98,22 +98,6 @@ struct odm_per_pkt_info {
bool bPacketBeacon;
};
-enum odm_ability {
- /* BB Team */
- ODM_DIG = 0x00000001,
- ODM_HIGH_POWER = 0x00000002,
- ODM_CCK_CCA_TH = 0x00000004,
- ODM_FA_STATISTICS = 0x00000008,
- ODM_RAMASK = 0x00000010,
- ODM_RSSI_MONITOR = 0x00000020,
- ODM_SW_ANTDIV = 0x00000040,
- ODM_HW_ANTDIV = 0x00000080,
- ODM_BB_PWRSV = 0x00000100,
- ODM_2TPATHDIV = 0x00000200,
- ODM_1TPATHDIV = 0x00000400,
- ODM_PSD2AFH = 0x00000800
-};
-
/* 2011/10/20 MH Define Common info enum for all team. */
enum odm_common_info_def {
@@ -137,19 +121,6 @@ enum odm_ability_def {
# define ODM_ITRF_USB 0x2
-/* ODM_CMNINFO_OP_MODE */
-enum odm_operation_mode {
- ODM_NO_LINK = BIT(0),
- ODM_LINK = BIT(1),
- ODM_SCAN = BIT(2),
- ODM_POWERSAVE = BIT(3),
- ODM_AP_MODE = BIT(4),
- ODM_CLIENT_MODE = BIT(5),
- ODM_AD_HOC = BIT(6),
- ODM_WIFI_DIRECT = BIT(7),
- ODM_WIFI_DISPLAY = BIT(8),
-};
-
/* ODM_CMNINFO_WM_MODE */
enum odm_wireless_mode {
ODM_WM_UNKNOW = 0x0,
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index fca8f3d116c2..f1a703643e74 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -77,10 +77,6 @@ void *rtw_malloc2d(int h, int w, int size);
spin_lock_init(&((q)->lock)); \
} while (0)
-u32 rtw_systime_to_ms(u32 systime);
-u32 rtw_ms_to_systime(u32 ms);
-s32 rtw_get_passing_time_ms(u32 start);
-
void rtw_usleep_os(int us);
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
@@ -94,49 +90,6 @@ static inline void flush_signals_thread(void)
flush_signals(current);
}
-#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
-
-static inline u32 _RND4(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
- return val;
-}
-
-static inline u32 _RND8(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
- return val;
-}
-
-static inline u32 _RND128(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
- return val;
-}
-
-static inline u32 _RND256(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
- return val;
-}
-
-static inline u32 _RND512(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
- return val;
-}
-
struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index 82cb4f7f4d3e..d2a069d4e1cc 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -160,9 +160,6 @@ struct hal_data_8188e {
u8 AntDivCfg;
u8 TRxAntDivType;
- u8 bDumpRxPkt;/* for debug */
- u8 bDumpTxPkt;/* for debug */
-
u8 OutEpQueueSel;
u8 OutEpNumber;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index edae053e350e..ef42c4b2f20c 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -998,13 +998,9 @@ Current IOREG MAP
#define STOP_BCNQ BIT(6)
/* 2 ACMHWCTRL */
-#define AcmHw_HwEn BIT(0)
-#define AcmHw_BeqEn BIT(1)
-#define AcmHw_ViqEn BIT(2)
-#define AcmHw_VoqEn BIT(3)
-#define AcmHw_BeqStatus BIT(4)
-#define AcmHw_ViqStatus BIT(5)
-#define AcmHw_VoqStatus BIT(6)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
/* 0x0600h ~ 0x07FFh WMAC Configuration */
/* 2APSD_CTRL */
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
deleted file mode 100644
index 01a7d987d6cc..000000000000
--- a/drivers/staging/r8188eu/include/rtw_debug.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define _drv_always_ 1
-#define _drv_emerg_ 2
-#define _drv_alert_ 3
-#define _drv_crit_ 4
-#define _drv_err_ 5
-#define _drv_warning_ 6
-#define _drv_notice_ 7
-#define _drv_info_ 8
-#define _drv_debug_ 9
-
-#define _module_rtl871x_xmit_c_ BIT(0)
-#define _module_xmit_osdep_c_ BIT(1)
-#define _module_rtl871x_recv_c_ BIT(2)
-#define _module_recv_osdep_c_ BIT(3)
-#define _module_rtl871x_mlme_c_ BIT(4)
-#define _module_mlme_osdep_c_ BIT(5)
-#define _module_rtl871x_sta_mgt_c_ BIT(6)
-#define _module_rtl871x_cmd_c_ BIT(7)
-#define _module_cmd_osdep_c_ BIT(8)
-#define _module_rtl871x_io_c_ BIT(9)
-#define _module_io_osdep_c_ BIT(10)
-#define _module_os_intfs_c_ BIT(11)
-#define _module_rtl871x_security_c_ BIT(12)
-#define _module_rtl871x_eeprom_c_ BIT(13)
-#define _module_hal_init_c_ BIT(14)
-#define _module_hci_hal_init_c_ BIT(15)
-#define _module_rtl871x_ioctl_c_ BIT(16)
-#define _module_rtl871x_ioctl_set_c_ BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_ BIT(19)
-#define _module_hci_intfs_c_ BIT(20)
-#define _module_hci_ops_c_ BIT(21)
-#define _module_osdep_service_c_ BIT(22)
-#define _module_mp_ BIT(23)
-#define _module_hci_ops_os_c_ BIT(24)
-#define _module_rtl871x_ioctl_os_c BIT(25)
-#define _module_rtl8712_cmd_c_ BIT(26)
-#define _module_rtl8192c_xmit_c_ BIT(27)
-#define _module_hal_xmit_c_ BIT(28)
-#define _module_efuse_ BIT(29)
-#define _module_rtl8712_recv_c_ BIT(30)
-#define _module_rtl8712_led_c_ BIT(31)
-
-#define DRIVER_PREFIX "R8188EU: "
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index 3e8d3bb48903..d8d48ace356c 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -11,10 +11,7 @@
struct eeprom_priv {
u8 bautoload_fail_flag;
- u8 bloadfile_fail_flag;
- u8 bloadmac_fail_flag;
u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
- u16 channel_plan;
u8 EepromOrEfuse;
u8 efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4);
};
diff --git a/drivers/staging/r8188eu/include/rtw_fw.h b/drivers/staging/r8188eu/include/rtw_fw.h
index c4b1a8370b4a..8f74157ee9ac 100644
--- a/drivers/staging/r8188eu/include/rtw_fw.h
+++ b/drivers/staging/r8188eu/include/rtw_fw.h
@@ -4,6 +4,11 @@
#ifndef __RTW_FW_H__
#define __RTW_FW_H__
+struct rt_firmware {
+ u8 *data;
+ u32 size;
+};
+
#include "drv_types.h"
int rtl8188e_firmware_download(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl.h b/drivers/staging/r8188eu/include/rtw_ioctl.h
index a36bd7313755..c704f3040ac8 100644
--- a/drivers/staging/r8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/r8188eu/include/rtw_ioctl.h
@@ -7,86 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#ifndef OID_802_11_CAPABILITY
- #define OID_802_11_CAPABILITY 0x0d010122
-#endif
-
-#ifndef OID_802_11_PMKID
- #define OID_802_11_PMKID 0x0d010123
-#endif
-
-/* For DDK-defined OIDs */
-#define OID_NDIS_SEG1 0x00010100
-#define OID_NDIS_SEG2 0x00010200
-#define OID_NDIS_SEG3 0x00020100
-#define OID_NDIS_SEG4 0x01010100
-#define OID_NDIS_SEG5 0x01020100
-#define OID_NDIS_SEG6 0x01020200
-#define OID_NDIS_SEG7 0xFD010100
-#define OID_NDIS_SEG8 0x0D010100
-#define OID_NDIS_SEG9 0x0D010200
-#define OID_NDIS_SEG10 0x0D020200
-
-#define SZ_OID_NDIS_SEG1 23
-#define SZ_OID_NDIS_SEG2 3
-#define SZ_OID_NDIS_SEG3 6
-#define SZ_OID_NDIS_SEG4 6
-#define SZ_OID_NDIS_SEG5 4
-#define SZ_OID_NDIS_SEG6 8
-#define SZ_OID_NDIS_SEG7 7
-#define SZ_OID_NDIS_SEG8 36
-#define SZ_OID_NDIS_SEG9 24
-#define SZ_OID_NDIS_SEG10 19
-
-/* For Realtek-defined OIDs */
-#define OID_MP_SEG1 0xFF871100
-#define OID_MP_SEG2 0xFF818000
-
-#define OID_MP_SEG3 0xFF818700
-#define OID_MP_SEG4 0xFF011100
-
-enum oid_type {
- QUERY_OID,
- SET_OID
-};
-
-struct oid_funs_node {
- unsigned int oid_start; /* the starting number for OID */
- unsigned int oid_end; /* the ending number for OID */
- struct oid_obj_priv *node_array;
- unsigned int array_sz; /* the size of node_array */
- int query_counter; /* count the number of query hits for this segment */
- int set_counter; /* count the number of set hits for this segment */
-};
-
-struct oid_par_priv {
- void *adapter_context;
- NDIS_OID oid;
- void *information_buf;
- u32 information_buf_len;
- u32 *bytes_rw;
- u32 *bytes_needed;
- enum oid_type type_of_oid;
- u32 dbg;
-};
-
-struct oid_obj_priv {
- unsigned char dbg; /* 0: without OID debug message
- * 1: with OID debug message */
- int (*oidfuns)(struct oid_par_priv *poid_par_priv);
-};
-
extern struct iw_handler_def rtw_handlers_def;
-
-int drv_query_info(struct net_device *miniportadaptercontext, NDIS_OID oid,
- void *informationbuffer, u32 informationbufferlength,
- u32 *byteswritten, u32 *bytesneeded);
-
-int drv_set_info(struct net_device *MiniportAdapterContext,
- NDIS_OID oid, void *informationbuffer,
- u32 informationbufferlength, u32 *bytesread,
- u32 *bytesneeded);
-
extern int ui_pid[3];
#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index 42d850f9d777..d81668498e46 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -363,8 +363,6 @@ struct mlme_priv {
u8 *assoc_req;
u32 assoc_req_len;
- u8 *assoc_rsp;
- u32 assoc_rsp_len;
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
* in 802.11g BSS) */
@@ -558,13 +556,9 @@ void rtw_scan_timeout_handler(struct adapter *adapter);
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
#define rtw_set_scan_deny(adapter, ms) do {} while (0)
-int _rtw_init_mlme_priv(struct adapter *padapter);
-
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
-
- struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
void _rtw_free_network(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork, u8 isfreeall);
@@ -596,7 +590,10 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
void rtw_set_roaming(struct adapter *adapter, u8 to_roaming);
u8 rtw_to_roaming(struct adapter *adapter);
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid);
void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
u32 mstatus);
+u8 rtw_current_antenna(struct adapter *adapter);
+
#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 0c555ea6719b..573d65b175cc 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -24,36 +24,12 @@
#define REAUTH_LIMIT (4)
#define REASSOC_LIMIT (4)
-#define READDBA_LIMIT (2)
-
-#define ROAMING_LIMIT 8
#define DYNAMIC_FUNC_DISABLE (0x0)
/* ====== ODM_ABILITY_E ======== */
/* BB ODM section BIT 0-15 */
#define DYNAMIC_BB_DIG BIT(0)
-#define DYNAMIC_BB_RA_MASK BIT(1)
-#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
-#define DYNAMIC_BB_BB_FA_CNT BIT(3)
-
-#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
-#define DYNAMIC_BB_CCK_PD BIT(5)
-#define DYNAMIC_BB_ANT_DIV BIT(6)
-#define DYNAMIC_BB_PWR_SAVE BIT(7)
-#define DYNAMIC_BB_PWR_TRA BIT(8)
-#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
-#define DYNAMIC_BB_PATH_DIV BIT(10)
-#define DYNAMIC_BB_PSD BIT(11)
-
-/* MAC DM section BIT 16-23 */
-#define DYNAMIC_MAC_EDCA_TURBO BIT(16)
-#define DYNAMIC_MAC_EARLY_MODE BIT(17)
-
-/* RF ODM section BIT 24-31 */
-#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
-#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
-#define DYNAMIC_RF_CALIBRATION BIT(26)
#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
@@ -208,17 +184,7 @@ enum SCAN_STATE {
SCAN_STATE_MAX,
};
-struct mlme_handler {
- unsigned int num;
- char *str;
- unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
-
-struct action_handler {
- unsigned int num;
- char* str;
- unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
+typedef unsigned int (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame);
struct ss_res {
int state;
@@ -419,7 +385,7 @@ struct mlme_ext_priv {
u8 active_keep_alive_check;
};
-int init_mlme_ext_priv(struct adapter *adapter);
+void init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
@@ -434,7 +400,6 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
void Save_DM_Func_Flag(struct adapter *padapter);
void Restore_DM_Func_Flag(struct adapter *padapter);
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
void Set_MSR(struct adapter *padapter, u8 type);
@@ -563,6 +528,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
unsigned char action, unsigned short status);
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
unsigned int send_beacon(struct adapter *padapter);
+bool get_beacon_valid_bit(struct adapter *adapter);
+void clear_beacon_valid_bit(struct adapter *adapter);
void start_clnt_assoc(struct adapter *padapter);
void start_clnt_auth(struct adapter *padapter);
@@ -594,20 +561,10 @@ unsigned int OnDeAuth(struct adapter *padapter,
unsigned int OnAction(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int on_action_spct(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_qos(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_dls(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnAction_back(struct adapter *padapter,
struct recv_frame *precv_frame);
unsigned int on_action_public(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int OnAction_ht(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_wmm(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnAction_p2p(struct adapter *padapter,
struct recv_frame *precv_frame);
@@ -635,8 +592,6 @@ void addba_timer_hdl(struct sta_info *psta);
bool cckrates_included(unsigned char *rate, int ratelen);
bool cckratesonly_included(unsigned char *rate, int ratelen);
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
-
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
@@ -769,9 +724,6 @@ struct C2HEvent_Header {
unsigned int rsvd;
};
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
-
enum rtw_c2h_event {
GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
GEN_EVT_CODE(_Read_BBREG),
@@ -806,7 +758,7 @@ enum rtw_c2h_event {
#ifdef _RTW_MLME_EXT_C_
static struct fwevent wlanevents[] = {
- {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL}, /*0*/
{0, NULL},
{0, NULL},
{0, NULL},
@@ -820,12 +772,12 @@ static struct fwevent wlanevents[] = {
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
{0, NULL},
- {0, rtw_dummy_event_callback},
+ {0, NULL},
{0, NULL}, /*15*/
{0, NULL},
{0, NULL},
{0, NULL},
- {0, rtw_fwdbg_event_callback},
+ {0, NULL},
{0, NULL}, /*20*/
{0, NULL},
{0, NULL},
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index 7c3cb895c3cd..6e9fdd66fad1 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -47,16 +47,8 @@ struct pwrctrl_priv {
u8 smart_ps;
u8 bcn_ant_mode;
- u32 alives;
- struct work_struct cpwm_event;
bool bpower_saving;
- u8 reg_rfoff;
- u8 reg_pdnmode; /* powerdown mode */
-
- /* RF OFF Level */
- u32 cur_ps_level;
- u32 reg_rfps_level;
uint ips_enter_cnts;
uint ips_leave_cnts;
@@ -64,7 +56,7 @@ struct pwrctrl_priv {
u8 ips_mode_req; /* used to accept the mode setting request,
* will update to ipsmode later */
uint bips_processing;
- u32 ips_deny_time; /* will deny IPS when system time less than this */
+ unsigned long ips_deny_time; /* will deny IPS when system time less than this */
u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
u8 bLeisurePs;
@@ -72,21 +64,15 @@ struct pwrctrl_priv {
u8 power_mgnt;
u8 bFwCurrentInPSMode;
u32 DelayLPSLastTimeStamp;
- s32 pnp_current_pwr_state;
- u8 pnp_bstop_trx;
u8 bInSuspend;
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
int pwr_state_check_interval;
- u8 pwr_state_check_cnts;
-
- int ps_flag;
enum rt_rf_power_state rf_pwrstate;/* cur power state */
enum rt_rf_power_state change_rfpwrstate;
- u8 wepkeymask;
u8 bkeepfwalive;
};
@@ -109,6 +95,7 @@ struct pwrctrl_priv {
void rtw_init_pwrctrl_priv(struct adapter *adapter);
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
void LeaveAllPowerSaveMode(struct adapter *adapter);
@@ -117,14 +104,10 @@ int ips_leave(struct adapter *padapter);
void rtw_ps_processor(struct adapter *padapter);
-s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
void LPS_Enter(struct adapter *adapter);
void LPS_Leave(struct adapter *adapter);
-int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
- const char *caller);
-#define rtw_pwr_wakeup(adapter) \
- _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+int rtw_pwr_wakeup(struct adapter *adapter);
int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 4ac4e6b3e177..66d240a7123d 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -80,7 +80,6 @@ struct rx_pkt_attrib {
u8 drvinfo_sz;
u8 shift_sz;
u8 hdrlen; /* the WLAN Header Len */
- u8 to_fr_ds;
u8 amsdu;
bool qos;
u8 priority;
@@ -167,7 +166,6 @@ struct recv_priv {
uint rx_largepacket_crcerr;
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
- struct semaphore allrxreturnevt;
u8 rx_pending_cnt;
struct tasklet_struct recv_tasklet;
@@ -230,7 +228,6 @@ struct recv_buf {
struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
- struct sk_buff *pkt_newalloc;
struct adapter *adapter;
u8 fragcnt;
int frame_tag;
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index b2df1480d66b..034a9f8f51c9 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -198,7 +198,7 @@ struct xmit_buf {
u32 len;
struct submit_ctx *sctx;
u32 ff_hwaddr;
- struct urb *pxmit_urb[8];
+ struct urb *pxmit_urb;
dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
u8 bpending[8];
int last[8];
@@ -341,7 +341,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+int rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/r8188eu/include/sta_info.h b/drivers/staging/r8188eu/include/sta_info.h
index b7e6b1f319a2..4112c837bcef 100644
--- a/drivers/staging/r8188eu/include/sta_info.h
+++ b/drivers/staging/r8188eu/include/sta_info.h
@@ -48,7 +48,6 @@ struct stainfo_stats {
u64 rx_ctrl_pkts;
u64 rx_data_pkts;
- u64 last_rx_mgnt_pkts;
u64 last_rx_beacon_pkts;
u64 last_rx_probereq_pkts;
u64 last_rx_probersp_pkts;
@@ -230,7 +229,6 @@ struct sta_info {
#define sta_update_last_rx_pkts(sta) \
do { \
- sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
diff --git a/drivers/staging/r8188eu/include/usb_ops.h b/drivers/staging/r8188eu/include/usb_ops.h
index 14526fcff4ae..ddc46cb44358 100644
--- a/drivers/staging/r8188eu/include/usb_ops.h
+++ b/drivers/staging/r8188eu/include/usb_ops.h
@@ -25,15 +25,14 @@
* @return true:
* @return false:
*/
-static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+static inline bool rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
{
- int ret = false;
- int value;
- value = atomic_inc_return(&dvobj->continual_urb_error);
+ int value = atomic_inc_return(&dvobj->continual_urb_error);
+
if (value > MAX_CONTINUAL_URB_ERR)
- ret = true;
+ return true;
- return ret;
+ return false;
}
/*
@@ -47,19 +46,14 @@ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
#define USB_HIGH_SPEED_BULK_SIZE 512
#define USB_FULL_SPEED_BULK_SIZE 64
-static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
- int buf_len)
+static inline bool rtw_usb_bulk_size_boundary(struct adapter *padapter, int buf_len)
{
- u8 rst = true;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
if (pdvobjpriv->pusbdev->speed == USB_SPEED_HIGH)
- rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
- true : false;
+ return buf_len % USB_HIGH_SPEED_BULK_SIZE == 0;
else
- rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
- true : false;
- return rst;
+ return buf_len % USB_FULL_SPEED_BULK_SIZE == 0;
}
#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
index 3e777ca52745..f271e93e9ab9 100644
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ b/drivers/staging/r8188eu/include/usb_osintf.h
@@ -6,16 +6,12 @@
#include "osdep_service.h"
#include "drv_types.h"
-#include "usb_vendor_req.h"
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
#define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
-u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
- enum rt_usb_wvalue wvalue, u8 windex, void *data,
- u8 datalen, u8 isdirectionin);
void netdev_br_init(struct net_device *netdev);
void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
diff --git a/drivers/staging/r8188eu/include/usb_vendor_req.h b/drivers/staging/r8188eu/include/usb_vendor_req.h
deleted file mode 100644
index 7337b1b7419f..000000000000
--- a/drivers/staging/r8188eu/include/usb_vendor_req.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _USB_VENDOR_REQUEST_H_
-#define _USB_VENDOR_REQUEST_H_
-
-/* 4 Set/Get Register related wIndex/Data */
-#define RT_USB_RESET_MASK_OFF 0
-#define RT_USB_RESET_MASK_ON 1
-#define RT_USB_SLEEP_MASK_OFF 0
-#define RT_USB_SLEEP_MASK_ON 1
-#define RT_USB_LDO_ON 1
-#define RT_USB_LDO_OFF 0
-
-/* 4 Set/Get SYSCLK related wValue or Data */
-#define RT_USB_SYSCLK_32KHZ 0
-#define RT_USB_SYSCLK_40MHZ 1
-#define RT_USB_SYSCLK_60MHZ 2
-
-enum bt_usb_request {
- RT_USB_SET_REGISTER = 1,
- RT_USB_SET_SYSCLK = 2,
- RT_USB_GET_SYSCLK = 3,
- RT_USB_GET_REGISTER = 4
-};
-
-enum rt_usb_wvalue {
- RT_USB_RESET_MASK = 1,
- RT_USB_SLEEP_MASK = 2,
- RT_USB_USB_HRCPWM = 3,
- RT_USB_LDO = 4,
- RT_USB_BOOT_TYPE = 5
-};
-
-#endif
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
index c331be19ff83..0254310bdf44 100644
--- a/drivers/staging/r8188eu/include/wifi.h
+++ b/drivers/staging/r8188eu/include/wifi.h
@@ -4,25 +4,14 @@
#ifndef _WIFI_H_
#define _WIFI_H_
+#include <linux/bits.h>
#include <linux/ieee80211.h>
-#ifdef BIT
-/* error "BIT define occurred earlier elsewhere!\n" */
-#undef BIT
-#endif
-#define BIT(x) (1 << (x))
-
#define WLAN_ETHHDR_LEN 14
#define WLAN_HDR_A3_LEN 24
#define WLAN_HDR_A3_QOS_LEN 26
#define WLAN_SSID_MAXLEN 32
-enum WIFI_FRAME_TYPE {
- WIFI_CTRL_TYPE = (BIT(2)),
- WIFI_DATA_TYPE = (BIT(3)),
- WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
-};
-
enum WIFI_FRAME_SUBTYPE {
/* below is for mgt frame */
WIFI_ASSOCREQ = (0 | IEEE80211_FTYPE_MGMT),
@@ -39,24 +28,15 @@ enum WIFI_FRAME_SUBTYPE {
WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | IEEE80211_FTYPE_MGMT),
/* below is for control frame */
- WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
- WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
- WIFI_CTRL_TYPE),
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | IEEE80211_FTYPE_CTL),
/* below is for data frame */
- WIFI_DATA = (0 | WIFI_DATA_TYPE),
- WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
- WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
- WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
- WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+ WIFI_DATA = (0 | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFACK = (BIT(4) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFPOLL = (BIT(5) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_NULL = (BIT(6) | IEEE80211_FTYPE_DATA),
+ WIFI_QOS_DATA_NULL = (BIT(6) | IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA),
};
enum WIFI_REASON_CODE {
@@ -172,8 +152,6 @@ enum WIFI_REG_DOMAIN {
#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
-#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
-
#define SetMFrag(pbuf) \
*(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
@@ -209,12 +187,6 @@ enum WIFI_REG_DOMAIN {
*(__le16 *)(pbuf) |= cpu_to_le16(type); \
} while (0)
-#define GetSequence(pbuf) \
- (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
-
-#define GetFragNum(pbuf) \
- (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
-
#define GetTupleCache(pbuf) \
(cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
@@ -239,8 +211,6 @@ enum WIFI_REG_DOMAIN {
#define SetPriority(pbuf, tid) \
*(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
-#define GetPriority(pbuf) ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
-
#define SetEOSP(pbuf, eosp) \
*(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
@@ -254,8 +224,6 @@ enum WIFI_REG_DOMAIN {
#define SetAMsdu(pbuf, amsdu) \
*(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
-#define GetAid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
-
#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \
(((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \
30 : 24))) & 0x000f)
@@ -270,10 +238,7 @@ enum WIFI_REG_DOMAIN {
static inline bool IS_MCAST(unsigned char *da)
{
- if ((*da) & 0x01)
- return true;
- else
- return false;
+ return (*da) & 0x01;
}
static inline unsigned char *get_da(unsigned char *pframe)
@@ -345,13 +310,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
return sa;
}
-static inline bool IsFrameTypeCtrl(unsigned char *pframe)
-{
- if (WIFI_CTRL_TYPE == GetFrameType(pframe))
- return true;
- else
- return false;
-}
/*-----------------------------------------------------------------------------
Below is for the security related definition
------------------------------------------------------------------------------*/
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index eb9375b0c660..1b09462ca908 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -4,7 +4,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/wlan_bssdef.h"
-#include "../include/rtw_debug.h"
#include "../include/wifi.h"
#include "../include/rtw_mlme.h"
#include "../include/rtw_mlme_ext.h"
@@ -1131,9 +1130,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
sec_len = *(pos++); len -= 1;
- if (sec_len > 0 && sec_len <= len) {
+ if (sec_len > 0 &&
+ sec_len <= len &&
+ sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
ssid_index++;
}
pos += sec_len;
@@ -1886,88 +1887,6 @@ static int rtw_wx_get_nick(struct net_device *dev,
return 0;
}
-static int rtw_wx_read32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter;
- struct iw_point *p;
- u16 len;
- u32 addr;
- u32 data32;
- u32 bytes;
- u8 *ptmp;
- int ret;
-
- padapter = (struct adapter *)rtw_netdev_priv(dev);
- p = &wrqu->data;
- len = p->length;
- ptmp = memdup_user(p->pointer, len);
- if (IS_ERR(ptmp))
- return PTR_ERR(ptmp);
-
- bytes = 0;
- addr = 0;
- sscanf(ptmp, "%d,%x", &bytes, &addr);
-
- switch (bytes) {
- case 1:
- data32 = rtw_read8(padapter, addr);
- sprintf(extra, "0x%02X", data32);
- break;
- case 2:
- data32 = rtw_read16(padapter, addr);
- sprintf(extra, "0x%04X", data32);
- break;
- case 4:
- data32 = rtw_read32(padapter, addr);
- sprintf(extra, "0x%08X", data32);
- break;
- default:
- ret = -EINVAL;
- goto err_free_ptmp;
- }
-
- kfree(ptmp);
- return 0;
-
-err_free_ptmp:
- kfree(ptmp);
- return ret;
-}
-
-static int rtw_wx_write32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- u32 addr;
- u32 data32;
- u32 bytes;
-
- bytes = 0;
- addr = 0;
- data32 = 0;
- sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
-
- switch (bytes) {
- case 1:
- rtw_write8(padapter, addr, (u8)data32);
- break;
- case 2:
- rtw_write16(padapter, addr, (u16)data32);
- break;
- case 4:
- rtw_write32(padapter, addr, data32);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rtw_wx_read_rf(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -2363,114 +2282,6 @@ static void rtw_p2p_setDN(struct net_device *dev,
pwdinfo->device_name_len = wrqu->data.length - 1;
}
-static void rtw_p2p_get_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Commented by Albert 2010/10/12 */
- /* Because of the output size limitation, I had removed the "Role" information. */
- /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
- sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo));
- wrqu->data.length = strlen(extra);
-}
-
-/* Commented by Albert 20110520 */
-/* This function will return the config method description */
-/* This config method description will show us which config method the remote P2P device is intended to use */
-/* by sending the provisioning discovery request frame. */
-
-static void rtw_p2p_get_req_cm(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_role(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo));
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_ifaddr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\nMAC %pM",
- pwdinfo->p2p_peer_interface_addr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n%pM",
- pwdinfo->rx_prov_disc_info.peerDevAddr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr_by_invitation(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\nMAC %pM",
- pwdinfo->p2p_peer_device_addr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_groupid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X %s",
- pwdinfo->groupid_info.go_device_addr[0], pwdinfo->groupid_info.go_device_addr[1],
- pwdinfo->groupid_info.go_device_addr[2], pwdinfo->groupid_info.go_device_addr[3],
- pwdinfo->groupid_info.go_device_addr[4], pwdinfo->groupid_info.go_device_addr[5],
- pwdinfo->groupid_info.ssid);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_op_ch(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel);
- wrqu->data.length = strlen(extra);
-}
-
static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3229,32 +3040,6 @@ static int rtw_p2p_set(struct net_device *dev,
return ret;
}
-static int rtw_p2p_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (!memcmp(wrqu->data.pointer, "status", 6)) {
- rtw_p2p_get_status(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
- rtw_p2p_get_role(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
- rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
- rtw_p2p_get_req_cm(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
- /* Get the P2P device address when receiving the provision discovery request frame. */
- rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
- rtw_p2p_get_groupid(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
- /* Get the P2P device address when receiving the P2P Invitation request frame. */
- rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
- rtw_p2p_get_op_ch(dev, info, wrqu, extra);
- }
- return 0;
-}
-
static int rtw_p2p_get2(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3389,6 +3174,34 @@ static void rf_reg_dump(struct adapter *padapter)
}
}
+static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ switch (dm_func) {
+ case 0:
+ /* disable all dynamic func */
+ odmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ break;
+ case 1:
+ /* disable DIG */
+ odmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+ break;
+ case 6:
+ /* turn on all dynamic func */
+ if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+ struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+ digtable->CurIGValue = rtw_read8(adapter, 0xc50);
+ }
+ odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ break;
+ default:
+ break;
+ }
+}
+
static int rtw_dbg_port(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3620,9 +3433,7 @@ static int rtw_dbg_port(struct net_device *dev,
break;
case 0x06:
{
- u32 ODMFlag;
- GetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
- ODMFlag = (u32)(0x0f & arg);
+ u32 ODMFlag = (u32)(0x0f & arg);
SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
}
break;
@@ -3632,13 +3443,6 @@ static int rtw_dbg_port(struct net_device *dev,
break;
case 0x09:
break;
- case 0x0c:/* dump rx/tx packet */
- if (arg == 0) {
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_RXPKT, &extra_arg);
- } else if (arg == 1) {
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_TXPKT, &extra_arg);
- }
- break;
case 0x15:
break;
case 0x10:/* driver version display */
@@ -3683,23 +3487,14 @@ static int rtw_dbg_port(struct net_device *dev,
rf_reg_dump(padapter);
break;
case 0xee:/* turn on/off dynamic funcs */
- {
- u32 odm_flag;
-
- if (0xf == extra_arg) {
- GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
- } else {
- /* extra_arg = 0 - disable all dynamic func
- extra_arg = 1 - disable DIG
- extra_arg = 2 - disable tx power tracking
- extra_arg = 3 - turn on all dynamic func
- */
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &extra_arg);
- GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
- }
+ if (extra_arg != 0xf) {
+ /* extra_arg = 0 - disable all dynamic func
+ * extra_arg = 1 - disable DIG
+ * extra_arg = 6 - turn on all dynamic func
+ */
+ rtw_set_dynamic_functions(padapter, extra_arg);
}
break;
-
case 0xfd:
rtw_write8(padapter, 0xc50, arg);
rtw_write8(padapter, 0xc58, arg);
@@ -3895,8 +3690,8 @@ static const struct iw_priv_args rtw_private_args[] = {
};
static iw_handler rtw_private_handler[] = {
-rtw_wx_write32, /* 0x00 */
-rtw_wx_read32, /* 0x01 */
+ NULL, /* 0x00 */
+ NULL, /* 0x01 */
NULL, /* 0x02 */
NULL, /* 0x03 */
/* for MM DTV platform */
@@ -3919,7 +3714,7 @@ NULL, /* 0x03 */
NULL, /* 0x0F */
rtw_p2p_set, /* 0x10 */
- rtw_p2p_get, /* 0x11 */
+ NULL, /* 0x11 */
rtw_p2p_get2, /* 0x12 */
NULL, /* 0x13 */
@@ -3958,10 +3753,10 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
struct iw_handler_def rtw_handlers_def = {
.standard = rtw_handlers,
- .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .num_standard = ARRAY_SIZE(rtw_handlers),
.private = rtw_private_handler,
.private_args = (struct iw_priv_args *)rtw_private_args,
- .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .num_private = ARRAY_SIZE(rtw_private_handler),
+ .num_private_args = ARRAY_SIZE(rtw_private_args),
.get_wireless_stats = rtw_get_wireless_stats,
};
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
index 72ad9700130e..899d8e9c3834 100644
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
@@ -66,7 +66,6 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* We have to backup the PMK information for WiFi PMK Caching test item. */
/* Backup the btkip_countermeasure information. */
/* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memset(&backup_pmkid[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
backup_index = adapter->securitypriv.PMKIDIndex;
backup_counter = adapter->securitypriv.btkip_countermeasure;
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 550721eef681..891c85b088ca 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -441,7 +441,6 @@ static void rtw_init_default_value(struct adapter *padapter)
u8 rtw_reset_drv_sw(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
/* hal_priv */
rtl8188eu_init_default_value(padapter);
@@ -457,8 +456,6 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
- pwrctrlpriv->pwr_state_check_cnts = 0;
-
/* mlmeextpriv */
padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
@@ -490,10 +487,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
reset_global_wifidirect_info(padapter);
- if (init_mlme_ext_priv(padapter) == _FAIL) {
- dev_err(dvobj_to_dev(padapter->dvobj), "init_mlme_ext_priv failed\n");
- goto free_mlme_priv;
- }
+ init_mlme_ext_priv(padapter);
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_xmit_priv failed\n");
@@ -534,7 +528,6 @@ free_xmit_priv:
free_mlme_ext:
free_mlme_ext_priv(&padapter->mlmeextpriv);
-free_mlme_priv:
rtw_free_mlme_priv(&padapter->mlmepriv);
free_evt_priv:
@@ -632,12 +625,6 @@ int _netdev_open(struct net_device *pnetdev)
{
uint status;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (pwrctrlpriv->ps_flag) {
- padapter->net_closed = false;
- goto netdev_open_normal_process;
- }
if (!padapter->bup) {
padapter->bDriverStopped = false;
@@ -681,7 +668,6 @@ int _netdev_open(struct net_device *pnetdev)
netdev_br_init(pnetdev);
-netdev_open_normal_process:
return 0;
netdev_open_error:
@@ -750,9 +736,36 @@ void rtw_ips_pwr_down(struct adapter *padapter)
padapter->bCardDisableWOHSM = false;
}
+static void rtw_fifo_cleanup(struct adapter *adapter)
+{
+ struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv;
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(adapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ adapter->xmitpriv.nqos_ssn = rtw_read16(adapter, REG_NQOS_SEQ);
+
+ if (!pwrpriv->bkeepfwalive) {
+ /* RX DMA stop */
+ rtw_write32(adapter, REG_RXPKT_NUM,
+ (rtw_read32(adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
+ do {
+ if (!(rtw_read32(adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
+ break;
+ } while (trycnt--);
+
+ /* RQPN Load 0 */
+ rtw_write16(adapter, REG_RQPN_NPQ, 0x0);
+ rtw_write32(adapter, REG_RQPN, 0x80000000);
+ mdelay(10);
+ }
+}
+
void rtw_ips_dev_unload(struct adapter *padapter)
{
- SetHwReg8188EU(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+ rtw_fifo_cleanup(padapter);
if (padapter->intf_stop)
padapter->intf_stop(padapter);
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 7a6fcc96081a..812acd59be79 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -42,22 +42,6 @@ Otherwise, there will be racing condition.
Caller must check if the list is empty before calling rtw_list_delete
*/
-inline u32 rtw_systime_to_ms(u32 systime)
-{
- return systime * 1000 / HZ;
-}
-
-inline u32 rtw_ms_to_systime(u32 ms)
-{
- return ms * HZ / 1000;
-}
-
-/* the input parameter start use the same unit as jiffies */
-inline s32 rtw_get_passing_time_ms(u32 start)
-{
- return rtw_systime_to_ms(jiffies - start);
-}
-
void rtw_usleep_os(int us)
{
if (1 < (us / 1000))
@@ -116,19 +100,10 @@ void rtw_free_netdev(struct net_device *netdev)
{
struct rtw_netdev_priv_indicator *pnpi;
- if (!netdev)
- goto RETURN;
-
pnpi = netdev_priv(netdev);
- if (!pnpi->priv)
- goto RETURN;
-
vfree(pnpi->priv);
free_netdev(netdev);
-
-RETURN:
- return;
}
int rtw_change_ifname(struct adapter *padapter, const char *ifname)
@@ -220,7 +195,7 @@ keep_ori:
*/
inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
{
- return (cbuf->write == cbuf->read) ? true : false;
+ return cbuf->write == cbuf->read;
}
/**
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index ffd727fb32e3..68869c5daeff 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -8,7 +8,6 @@
#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
#include "../include/osdep_intf.h"
-#include "../include/usb_vendor_req.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
#include "../include/rtw_ioctl.h"
@@ -200,8 +199,6 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- int ret = 0;
-
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved))
goto exit;
@@ -240,7 +237,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_indicate_disconnect(padapter);
exit:
- return ret;
+ return 0;
}
static int rtw_resume(struct usb_interface *pusb_intf)
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index c4b6dbc8d66d..0269e602b217 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -106,8 +106,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
- if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
- (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
goto exit;
}
@@ -141,7 +140,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
- purb = pxmitbuf->pxmit_urb[0];
+ purb = pxmitbuf->pxmit_urb;
/* translate DMA FIFO addr to pipehandle */
pipe = ffaddr2pipehdl(pdvobj, addr);
@@ -179,25 +178,21 @@ exit:
void rtw_write_port_cancel(struct adapter *padapter)
{
- int i, j;
+ int i;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
padapter->bWritePortCancel = true;
for (i = 0; i < NR_XMITBUFF; i++) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
+ if (pxmitbuf->pxmit_urb)
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
+ if (pxmitbuf->pxmit_urb)
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
}
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
index a6012cffd37e..e430c64e9068 100644
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/r8188eu/os_dep/xmit_linux.c
@@ -67,8 +67,6 @@ bool rtw_endofpktfile(struct pkt_file *pfile)
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
- int i;
-
pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
if (!pxmitbuf->pallocated_buf)
return _FAIL;
@@ -76,21 +74,17 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
pxmitbuf->dma_transfer_addr = 0;
- for (i = 0; i < 8; i++) {
- pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb[i])
- return _FAIL;
- }
+ pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxmitbuf->pxmit_urb)
+ return _FAIL;
+
return _SUCCESS;
}
void rtw_os_xmit_resource_free(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 free_sz)
{
- int i;
-
- for (i = 0; i < 8; i++)
- usb_free_urb(pxmitbuf->pxmit_urb[i]);
+ usb_free_urb(pxmitbuf->pxmit_urb);
kfree(pxmitbuf->pallocated_buf);
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 52eeb56c5c76..4abec7b42993 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -185,10 +185,10 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
for (index = 0; index < 6; index++) {
writeVal = (u32)(priv->MCSTxPowerLevelOriginalOffset[index] +
((index < 2) ? powerBase0 : powerBase1));
- byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00)>>8);
- byte2 = (u8)((writeVal & 0x7f0000)>>16);
- byte3 = (u8)((writeVal & 0x7f000000)>>24);
+ byte0 = writeVal & 0x7f;
+ byte1 = (writeVal & 0x7f00) >> 8;
+ byte2 = (writeVal & 0x7f0000) >> 16;
+ byte3 = (writeVal & 0x7f000000) >> 24;
if (byte0 > 0x24)
byte0 = 0x24;
if (byte1 > 0x24)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index c5e44bbe997c..cd8bbc358d01 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -58,7 +58,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
memset(pTxFwInfo, 0x12, 8);
} else {
- tcb_desc->txbuf_size = (u16)frag_length;
+ tcb_desc->txbuf_size = frag_length;
}
seg_ptr = skb_put(skb, frag_length);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 7f9dee42a04d..4b9249195b5a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -221,7 +221,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
&priv->rtllib->current_network.qos_data.parameters;
u8 pAcParam = *val;
u32 eACI = pAcParam;
- union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
+ union aci_aifsn *pAciAifsn = (union aci_aifsn *)&
(qos_parameters->aifs[0]);
u8 acm = pAciAifsn->f.acm;
u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
@@ -320,8 +320,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
usValue = rtl92e_eeprom_read(dev,
- (u16)(EEPROM_Customer_ID>>1)) >> 8;
- priv->eeprom_CustomerID = (u8)(usValue & 0xff);
+ (EEPROM_Customer_ID >> 1)) >> 8;
+ priv->eeprom_CustomerID = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
EEPROM_ICVersion_ChannelPlan>>1);
priv->eeprom_ChannelPlan = usValue&0xff;
@@ -399,9 +399,9 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->EEPROMLegacyHTTxPowerDiff);
if (!priv->AutoloadFailFlag)
- priv->EEPROMThermalMeter = (u8)(((rtl92e_eeprom_read(dev,
+ priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
(EEPROM_ThermalMeter>>1))) &
- 0xff00)>>8);
+ 0xff00) >> 8;
else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
@@ -413,8 +413,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
priv->EEPROMAntPwDiff = usValue & 0x0fff;
- priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
- >> 12);
+ priv->EEPROMCrystalCap = (usValue & 0xf000)
+ >> 12;
} else {
priv->EEPROMAntPwDiff =
EEPROM_Default_AntTxPowerDiff;
@@ -811,7 +811,7 @@ start:
rtl92e_config_mac(dev);
- if (priv->card_8192_version > (u8) VERSION_8190_BD) {
+ if (priv->card_8192_version > VERSION_8190_BD) {
rtl92e_get_tx_power(dev);
rtl92e_set_tx_power(dev, priv->chan);
}
@@ -894,9 +894,8 @@ start:
for (i = 0; i < TxBBGainTableLength; i++) {
if (tmpRegA == dm_tx_bb_gain[i]) {
- priv->rfa_txpowertrackingindex = (u8)i;
- priv->rfa_txpowertrackingindex_real =
- (u8)i;
+ priv->rfa_txpowertrackingindex = i;
+ priv->rfa_txpowertrackingindex_real = i;
priv->rfa_txpowertracking_default =
priv->rfa_txpowertrackingindex;
break;
@@ -908,7 +907,7 @@ start:
for (i = 0; i < CCKTxBBGainTableLength; i++) {
if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
- priv->CCKPresentAttentuation_20Mdefault = (u8)i;
+ priv->CCKPresentAttentuation_20Mdefault = i;
break;
}
}
@@ -1176,7 +1175,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
- pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->data_rate);
+ pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
@@ -1195,7 +1194,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
pTxFwInfo->RtsHT = (cb_desc->rts_rate&0x80) ? 1 : 0;
- pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->rts_rate);
+ pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw(cb_desc->rts_rate);
pTxFwInfo->RtsBandwidth = 0;
pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
@@ -1226,7 +1225,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
- pdesc->PktSize = (u16)skb->len-sizeof(struct tx_fwinfo_8190pci);
+ pdesc->PktSize = skb->len - sizeof(struct tx_fwinfo_8190pci);
pdesc->SecCAMID = 0;
pdesc->RATid = cb_desc->RATRIndex;
@@ -1299,11 +1298,10 @@ void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
- entry_tmp->PktSize = (u16)(cb_desc->pkt_size +
- entry_tmp->Offset);
+ entry_tmp->PktSize = cb_desc->pkt_size + entry_tmp->Offset;
entry_tmp->QueueSelect = QSLT_CMD;
entry_tmp->TxFWInfoSize = 0x08;
- entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
+ entry_tmp->RATid = DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
entry->TxBuffAddr = mapping;
@@ -1613,9 +1611,8 @@ static void _rtl92e_query_rxphystatus(
total_rssi += RSSI;
if (bpacket_match_bssid) {
- pstats->RxMIMOSignalStrength[i] = (u8) RSSI;
- precord_stats->RxMIMOSignalStrength[i] =
- (u8) RSSI;
+ pstats->RxMIMOSignalStrength[i] = RSSI;
+ precord_stats->RxMIMOSignalStrength[i] = RSSI;
}
}
@@ -1661,14 +1658,14 @@ static void _rtl92e_query_rxphystatus(
if (is_cck_rate) {
pstats->SignalStrength = precord_stats->SignalStrength =
- (u8)(_rtl92e_signal_scale_mapping(priv,
- (long)pwdb_all));
+ _rtl92e_signal_scale_mapping(priv,
+ (long)pwdb_all);
} else {
if (rf_rx_num != 0)
pstats->SignalStrength = precord_stats->SignalStrength =
- (u8)(_rtl92e_signal_scale_mapping(priv,
- (long)(total_rssi /= rf_rx_num)));
+ _rtl92e_signal_scale_mapping(priv,
+ (long)(total_rssi /= rf_rx_num));
}
}
@@ -1709,8 +1706,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
slide_rssi_index = 0;
tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
- priv->stats.signal_strength = rtl92e_translate_to_dbm(priv,
- (u8)tmp_val);
+ priv->stats.signal_strength = rtl92e_translate_to_dbm(priv, tmp_val);
curr_st->rssi = priv->stats.signal_strength;
if (!prev_st->bPacketMatchBSSID) {
if (!prev_st->bToSelfBA)
@@ -2036,7 +2032,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
- (u8)pDrvInfo->RxRate);
+ pDrvInfo->RxRate);
stats->bShortPreamble = pDrvInfo->SPLCP;
_rtl92e_update_received_rate_histogram_stats(dev, stats);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9b025b9fa7ab..38110fa4f36d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -34,8 +34,7 @@ static bool _rtl92e_fw_boot_cpu(struct net_device *dev)
netdev_dbg(dev, "Download Firmware: Put code ok!\n");
CPU_status = rtl92e_readl(dev, CPU_GEN);
- rtl92e_writeb(dev, CPU_GEN,
- (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+ rtl92e_writeb(dev, CPU_GEN, (CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff);
mdelay(1);
if (!_rtl92e_wait_for_fw(dev, CPU_GEN_BOOT_RDY, 200)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 411138102948..f92551094738 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -919,7 +919,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
continue;
switch (CurrentCmd->CmdID) {
case CmdID_SetTxPowerLevel:
- if (priv->IC_Cut > (u8)VERSION_8190_BD)
+ if (priv->IC_Cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
@@ -929,11 +929,11 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
break;
case CmdID_WritePortUshort:
rtl92e_writew(dev, CurrentCmd->Para1,
- (u16)CurrentCmd->Para2);
+ CurrentCmd->Para2);
break;
case CmdID_WritePortUchar:
rtl92e_writeb(dev, CurrentCmd->Para1,
- (u8)CurrentCmd->Para2);
+ CurrentCmd->Para2);
break;
case CmdID_RF_WriteReg:
for (eRFPath = 0; eRFPath <
@@ -1299,17 +1299,17 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
+ BitMask);
priv->initgain_backup.xbagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
+ BitMask);
priv->initgain_backup.xcagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
+ BitMask);
priv->initgain_backup.xdagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
+ BitMask);
BitMask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 756d8db51937..d58800d06e8f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -633,7 +633,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
- powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
+ powerlevelOFDM24G = priv->Pwr_Track >> 24;
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
@@ -833,7 +833,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
bMaskDWord);
for (i = 0; i < OFDM_Table_Length; i++) {
if (tmpRegA == OFDMSwingTable[i]) {
- priv->OFDM_index[0] = (u8)i;
+ priv->OFDM_index[0] = i;
RT_TRACE(COMP_POWER_TRACKING,
"Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
rOFDM0_XATxIQImbalance, tmpRegA,
@@ -844,7 +844,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
for (i = 0; i < CCK_Table_length; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
- priv->CCK_index = (u8) i;
+ priv->CCK_index = i;
RT_TRACE(COMP_POWER_TRACKING,
"Initial reg0x%x = 0x%x, CCK_index = 0x%x\n",
rCCK0_TxFilter1, TempCCk,
@@ -1041,7 +1041,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
{
u32 TempVal;
struct r8192_priv *priv = rtllib_priv(dev);
- u8 attenuation = (u8)priv->CCKPresentAttentuation;
+ u8 attenuation = priv->CCKPresentAttentuation;
TempVal = 0;
if (!bInCH14) {
@@ -1245,10 +1245,10 @@ void rtl92e_dm_backup_state(struct net_device *dev)
return;
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- priv->initgain_backup.xaagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
- priv->initgain_backup.xbagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
- priv->initgain_backup.xcagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
- priv->initgain_backup.xdagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
+ priv->initgain_backup.xaagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
+ priv->initgain_backup.xbagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
+ priv->initgain_backup.xcagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
+ priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
@@ -1535,7 +1535,7 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
|| !initialized || force_write) {
- initial_gain = (u8)dm_digtable.cur_ig_value;
+ initial_gain = dm_digtable.cur_ig_value;
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
@@ -2513,5 +2513,5 @@ static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- rtl92e_writeb(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+ rtl92e_writeb(dev, DRIVER_RSSI, priv->undecorated_smoothed_pwdb);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 97afea4c3511..7d04966afdd9 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -238,7 +238,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
skb->data, skb->len);
#endif
- req = (struct rtllib_hdr_3addr *) skb->data;
+ req = (struct rtllib_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = (u8 *)(&req->addr2[0]);
tag += sizeof(struct rtllib_hdr_3addr);
@@ -343,7 +343,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
goto OnADDBARsp_Reject;
}
-
if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
(u8)(pBaParamSet->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
@@ -355,7 +354,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pPendingBA = &pTS->TxPendingBARecord;
pAdmittedBA = &pTS->TxAdmittedBARecord;
-
if (pAdmittedBA->b_valid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
@@ -374,7 +372,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
DeActivateBAEntry(ieee, pPendingBA);
}
-
if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
pTS->bAddBaReqDelayed = true;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c985e4ebc545..0ecd81a81866 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1585,7 +1585,7 @@ struct rtllib_device {
short sta_sleep;
int ps_timeout;
int ps_period;
- struct tasklet_struct ps_task;
+ struct work_struct ps_task;
u64 ps_time;
bool polling;
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index ed968c01c7ff..a8d22da8bc9a 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -103,7 +103,7 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
if (a4_included)
aad_len += 6;
if (qc_included) {
- pos = (u8 *) &hdr->addr4;
+ pos = (u8 *)&hdr->addr4;
if (a4_included)
pos += 6;
qc = *pos & 0x0f;
@@ -130,13 +130,13 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
* A4 (if present)
* QC (if present)
*/
- pos = (u8 *) hdr;
+ pos = (u8 *)hdr;
aad[0] = pos[0] & 0x8f;
aad[1] = pos[1] & 0xc7;
memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
- pos = (u8 *) &hdr->seq_ctl;
+ pos = (u8 *)&hdr->seq_ctl;
aad[20] = pos[0] & 0x0f;
aad[21] = 0; /* all bits masked */
memset(aad + 22, 0, 8);
@@ -186,7 +186,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
struct aead_request *req;
struct scatterlist sg[2];
@@ -235,7 +235,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 4a760ecbc31e..8bc95651e384 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -136,7 +136,7 @@ static inline u16 Hi16(u32 val)
static inline u16 Mk16(u8 hi, u8 lo)
{
- return lo | (((u16) hi) << 8);
+ return lo | (hi << 8);
}
@@ -220,7 +220,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
/* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts.
*/
- u16 *PPK = (u16 *) &WEPSeed[4];
+ u16 *PPK = (u16 *)&WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
@@ -231,15 +231,15 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
PPK[5] = TTAK[4] + IV16;
/* Step 2 - 96-bit bijective mixing using S-box */
- PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
- PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
- PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
- PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
- PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
- PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
-
- PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
- PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+ PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *)&TK[0]));
+ PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *)&TK[2]));
+ PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *)&TK[4]));
+ PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *)&TK[6]));
+ PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *)&TK[8]));
+ PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *)&TK[10]));
+
+ PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *)&TK[12]));
+ PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *)&TK[14]));
PPK[2] += RotR1(PPK[1]);
PPK[3] += RotR1(PPK[2]);
PPK[4] += RotR1(PPK[3]);
@@ -251,7 +251,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
WEPSeed[0] = Hi8(IV16);
WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
WEPSeed[2] = Lo8(IV16);
- WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+ WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *)&TK[0])) >> 1);
#ifdef __BIG_ENDIAN
{
@@ -280,7 +280,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
@@ -357,7 +357,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
@@ -485,7 +485,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct rtllib_hdr_4addr *hdr11;
- hdr11 = (struct rtllib_hdr_4addr *) skb->data;
+ hdr11 = (struct rtllib_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
case RTLLIB_FCTL_TODS:
@@ -518,7 +518,7 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
u8 *pos;
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
netdev_dbg(skb->dev,
@@ -558,7 +558,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
ether_addr_copy(ev.src_addr.sa_data, hdr->addr2);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
@@ -568,7 +568,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
u8 mic[8];
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
@@ -584,7 +584,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
netdev_dbg(skb->dev,
"Michael MIC verification failed for MSDU from %pM keyidx=%d\n",
hdr->addr2, keyidx);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index eb904b42f9c6..abe5c153f74e 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -250,7 +250,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
if (skb->len < 24)
return 0;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
@@ -299,7 +299,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -339,7 +339,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -936,7 +936,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
} else {
struct rx_ts_record *pRxTS = NULL;
- if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2,
+ if (GetTs(ieee, (struct ts_common_info **)&pRxTS, hdr->addr2,
(u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
if ((fc & (1<<11)) && (frag == pRxTS->rx_last_frag_num) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num))
@@ -1100,7 +1100,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
@@ -1152,7 +1152,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
* delivered, so remove skb from fragment cache
*/
skb = frag_skb;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
rtllib_frag_cache_invalidate(ieee, hdr);
}
@@ -1165,7 +1165,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
@@ -1397,13 +1397,13 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
goto rx_exit;
/* Get TS for Rx Reorder */
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1)
&& (!bToOtherSTA)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID,
+ GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
ieee->bis_any_nonbepkts = true;
@@ -2053,7 +2053,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
}
network->ssid_len = min(info_element->len,
- (u8) IW_ESSID_MAX_SIZE);
+ (u8)IW_ESSID_MAX_SIZE);
memcpy(network->ssid, info_element->data,
network->ssid_len);
if (network->ssid_len < IW_ESSID_MAX_SIZE)
@@ -2721,7 +2721,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == RTLLIB_LINKED))
- tasklet_schedule(&ieee->ps_task);
+ schedule_work(&ieee->ps_task);
break;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 4b6c2295a3cf..b5f4d35954a9 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -202,7 +202,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
@@ -279,7 +279,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
@@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
@@ -856,9 +856,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
if (ieee->pHTInfo->bCurrentHTSupport) {
- tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf = (u8 *) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_buf = (u8 *)&(ieee->pHTInfo->SelfHTInfo);
tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
&tmp_ht_cap_len, encrypt, false);
@@ -912,7 +912,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
- tag = (u8 *) beacon_buf->info_element[0].data;
+ tag = (u8 *)beacon_buf->info_element[0].data;
memcpy(tag, ssid, ssid_len);
@@ -1303,7 +1303,7 @@ rtllib_association_req(struct rtllib_network *beacon,
0x00};
struct octet_string osCcxRmCap;
- osCcxRmCap.Octet = (u8 *) CcxRmCapBuf;
+ osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
osCcxRmCap.Length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
@@ -1764,7 +1764,7 @@ static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
+static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
u8 **challenge, int *chlen)
{
struct rtllib_authentication *a;
@@ -1773,10 +1773,10 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
if (skb->len < (sizeof(struct rtllib_authentication) -
sizeof(struct rtllib_info_element))) {
netdev_dbg(dev, "invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
+ return -EINVAL;
}
*challenge = NULL;
- a = (struct rtllib_authentication *) skb->data;
+ a = (struct rtllib_authentication *)skb->data;
if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
t = skb->data + sizeof(struct rtllib_authentication);
@@ -1787,7 +1787,13 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
return -ENOMEM;
}
}
- return le16_to_cpu(a->status);
+
+ if (a->status) {
+ netdev_dbg(dev, "auth_parse() failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
@@ -1799,7 +1805,7 @@ static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
netdev_dbg(dev, "invalid len in auth request: %d\n", skb->len);
return -1;
}
- a = (struct rtllib_authentication *) skb->data;
+ a = (struct rtllib_authentication *)skb->data;
ether_addr_copy(dest, a->header.addr2);
@@ -1817,7 +1823,7 @@ static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
u8 *ssid = NULL;
u8 ssidlen = 0;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
bool bssid_match;
if (skb->len < sizeof(struct rtllib_hdr_3addr))
@@ -1865,7 +1871,7 @@ static int assoc_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
return -1;
}
- a = (struct rtllib_assoc_request_frame *) skb->data;
+ a = (struct rtllib_assoc_request_frame *)skb->data;
ether_addr_copy(dest, a->header.addr2);
@@ -1884,7 +1890,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
return 0xcafe;
}
- response_head = (struct rtllib_assoc_response_frame *) skb->data;
+ response_head = (struct rtllib_assoc_response_frame *)skb->data;
*aid = le16_to_cpu(response_head->aid) & 0x3fff;
status_code = le16_to_cpu(response_head->status);
@@ -2042,13 +2048,15 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(struct tasklet_struct *t)
+static inline void rtllib_sta_ps(struct work_struct *work)
{
- struct rtllib_device *ieee = from_tasklet(ieee, t, ps_task);
+ struct rtllib_device *ieee;
u64 time;
short sleep;
unsigned long flags, flags2;
+ ieee = container_of(work, struct rtllib_device, ps_task);
+
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == RTLLIB_PS_DISABLED ||
@@ -2167,7 +2175,7 @@ EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee,
struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
u8 category = 0;
@@ -2206,7 +2214,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
int aid;
u8 *ies;
struct rtllib_assoc_response_frame *assoc_resp;
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl = le16_to_cpu(header->frame_ctl);
netdev_dbg(ieee->dev, "received [RE]ASSOCIATION RESPONSE (%d)\n",
@@ -2278,7 +2286,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
{
- u16 errcode;
+ int errcode;
u8 *challenge;
int chlen = 0;
bool bSupportNmode = true, bHalfSupportNmode = false;
@@ -2288,8 +2296,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
if (errcode) {
ieee->softmac_stats.rx_auth_rs_err++;
netdev_info(ieee->dev,
- "Authentication response status code 0x%x",
- errcode);
+ "Authentication response status code %d", errcode);
rtllib_associate_abort(ieee);
return;
}
@@ -2351,7 +2358,7 @@ rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
static inline int
rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
@@ -2391,7 +2398,7 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct rtllib_rx_stats *rx_stats, u16 type,
u16 stype)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (!ieee->proto_started)
@@ -2811,7 +2818,7 @@ static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
if (!skb)
return NULL;
- b = (struct rtllib_probe_response *) skb->data;
+ b = (struct rtllib_probe_response *)skb->data;
b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
return skb;
@@ -2827,7 +2834,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
if (!skb)
return NULL;
- b = (struct rtllib_probe_response *) skb->data;
+ b = (struct rtllib_probe_response *)skb->data;
b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -3028,7 +3035,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
+ INIT_WORK(&ieee->ps_task, rtllib_sta_ps);
return 0;
}
@@ -3050,8 +3057,8 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_work_sync(&ieee->associate_complete_wq);
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
+ cancel_work_sync(&ieee->ps_task);
mutex_unlock(&ieee->wx_mutex);
- tasklet_kill(&ieee->ps_task);
}
static inline struct sk_buff *
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 57a6d1130b6a..70a62ca0f69a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -41,8 +41,8 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
/* if setting by freq convert to channel */
if (fwrq->e == 1) {
- if ((fwrq->m >= (int) 2.412e8 &&
- fwrq->m <= (int) 2.487e8)) {
+ if ((fwrq->m >= (int)2.412e8 &&
+ fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 0d67d5880377..cf9a240924f2 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -660,7 +660,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
{
u8 i = 0;
bool deauth = false;
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
if (ieee->state != RTLLIB_LINKED)
return -ENOLINK;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 68c0bf9a191a..b577f9c81f85 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1790,7 +1790,7 @@ struct ieee80211_device {
short sta_sleep;
int ps_timeout;
int ps_period;
- struct tasklet_struct ps_task;
+ struct work_struct ps_task;
u32 ps_th;
u32 ps_tl;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 101c28265e91..f17d07dad56d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -362,7 +362,7 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_ccmp_data *data = priv;
if (len < CCMP_TK_LEN)
- return -1;
+ return 0;
if (!data->key_set)
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 689d8843f538..7b120b8cb982 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -637,7 +637,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
if (len < TKIP_KEY_LEN)
- return -1;
+ return 0;
if (!tkey->key_set)
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 8a51ea1dd6e5..a2cdf3bfd1a4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -201,7 +201,7 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
struct prism2_wep_data *wep = priv;
if (len < wep->key_len)
- return -1;
+ return 0;
memcpy(key, wep->key, wep->key_len);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1a43979939a8..92001cb36730 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
@@ -1461,13 +1461,13 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
+static inline int auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
+ return -EINVAL;
}
*challenge = NULL;
a = (struct ieee80211_authentication *)skb->data;
@@ -1482,7 +1482,12 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
}
}
- return le16_to_cpu(a->status);
+ if (a->status) {
+ IEEE80211_DEBUG_MGMT("auth_parse() failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
@@ -1687,14 +1692,15 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(struct tasklet_struct *t)
+static inline void ieee80211_sta_ps(struct work_struct *work)
{
- struct ieee80211_device *ieee = from_tasklet(ieee, t, ps_task);
+ struct ieee80211_device *ieee;
u32 th, tl;
short sleep;
-
unsigned long flags, flags2;
+ ieee = container_of(work, struct ieee80211_device, ps_task);
+
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == IEEE80211_PS_DISABLED ||
@@ -1826,7 +1832,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
{
/* default support N mode, disable halfNmode */
bool bSupportNmode = true, bHalfSupportNmode = false;
- u16 errcode;
+ int errcode;
u8 *challenge;
int chlen = 0;
u32 iotAction;
@@ -1875,7 +1881,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
}
} else {
ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Auth response status code 0x%x", errcode);
+ IEEE80211_DEBUG_MGMT("Auth response status code %d\n", errcode);
ieee80211_associate_abort(ieee);
}
}
@@ -1897,7 +1903,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == IEEE80211_LINKED))
- tasklet_schedule(&ieee->ps_task);
+ schedule_work(&ieee->ps_task);
if (WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
@@ -2602,7 +2608,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_setup(&ieee->ps_task, ieee80211_sta_ps);
+ INIT_WORK(&ieee->ps_task, ieee80211_sta_ps);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
@@ -2613,7 +2619,7 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
-
+ cancel_work_sync(&ieee->ps_task);
mutex_unlock(&ieee->wx_mutex);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 78cc8f357bbc..d6829cf6f7e3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -470,7 +470,9 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
return 0;
}
len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
- erq->length = (len >= 0 ? len : 0);
+ if (len < 0)
+ len = 0;
+ erq->length = len;
erq->flags |= IW_ENCODE_ENABLED;
@@ -686,9 +688,9 @@ int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
} else {
if (strcmp(crypt->ops->name, "WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
+ else if (strcmp(crypt->ops->name, "TKIP") == 0)
ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
+ else if (strcmp(crypt->ops->name, "CCMP") == 0)
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index dba3f2db9f48..a93f09033d9d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -480,7 +480,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
}
memset(posHTCap, 0, *len);
if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
- u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
+ static const u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (struct ht_capability_ele *)&posHTCap[4];
@@ -940,10 +940,8 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
} else {
- if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
+ pHTInfo->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
+ HT_AGG_SIZE_32K);
}
}
@@ -951,10 +949,9 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
* <2> Set AMPDU Minimum MPDU Start Spacing
* 802.11n 3.0 section 9.7d.3
*/
- if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
+ pHTInfo->CurrentMPDUDensity = max_t(u32, pHTInfo->MPDU_Density,
+ pPeerHTCap->MPDUDensity);
+
if (ieee->pairwise_key_type != KEY_TYPE_NA)
pHTInfo->CurrentMPDUDensity = 7; // 8us
// Force TX AMSDU
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index ce807c9d4219..2ca925f35830 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2537,7 +2537,7 @@ static short rtl8192_init(struct net_device *dev)
}
#else
{
- const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+ static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index a44d04effc8b..76ac798642bd 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -157,12 +157,11 @@ struct _adapter {
struct iw_statistics iwstats;
int pid; /*process id from UI*/
struct work_struct wk_filter_rx_ff0;
- u8 blnEnableRxFF0Filter;
- spinlock_t lock_rx_ff0_filter;
const struct firmware *fw;
struct usb_interface *pusb_intf;
struct mutex mutex_start;
struct completion rtl8712_fw_ready;
+ struct completion rx_filter_ready;
};
static inline u8 *myid(struct eeprom_priv *peepriv)
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index f926809b1021..7d8f1a29d18a 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -162,13 +162,13 @@ int r8712_generate_ie(struct registry_priv *registrypriv)
uint sz = 0;
struct wlan_bssid_ex *dev_network = &registrypriv->dev_network;
u8 *ie = dev_network->IEs;
- u16 beaconPeriod = (u16)dev_network->Configuration.BeaconPeriod;
+ u16 beacon_period = (u16)dev_network->Configuration.BeaconPeriod;
/*timestamp will be inserted by hardware*/
sz += 8;
ie += sz;
/*beacon interval : 2bytes*/
- *(__le16 *)ie = cpu_to_le16(beaconPeriod);
+ *(__le16 *)ie = cpu_to_le16(beacon_period);
sz += 2;
ie += 2;
/*capability info*/
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index d15d52c0d1a7..003e97205124 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
r8712_free_evt_priv(&padapter->evtpriv);
r8712_DeInitSwLeds(padapter);
r8712_free_mlme_priv(&padapter->mlmepriv);
- r8712_free_io_queue(padapter);
_free_xmit_priv(&padapter->xmitpriv);
_r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
diff --git a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
index e125c7222ab5..68bdec07f51e 100644
--- a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
@@ -91,6 +91,5 @@
#define _BCNSPACE_MSK 0x0FFF
#define _BCNSPACE_SHT 0
-
#endif /* __RTL8712_CMDCTRL_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.h b/drivers/staging/rtl8712/rtl8712_efuse.h
index 4969d307e978..2e1ea9d7a295 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.h
+++ b/drivers/staging/rtl8712/rtl8712_efuse.h
@@ -15,8 +15,8 @@
#define GET_EFUSE_OFFSET(header) ((header & 0xF0) >> 4)
#define GET_EFUSE_WORD_EN(header) (header & 0x0F)
-#define MAKE_EFUSE_HEADER(offset, word_en) (((offset & 0x0F) << 4) | \
- (word_en & 0x0F))
+#define MAKE_EFUSE_HEADER(offset, word_en) ((((offset) & 0x0F) << 4) | \
+ ((word_en) & 0x0F))
/*--------------------------------------------------------------------------*/
struct PGPKT_STRUCT {
u8 offset;
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
index 3d9f40fa8469..46d758d3f3a4 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
@@ -7,7 +7,6 @@
#ifndef __RTL8712_MACSETTING_BITDEF_H__
#define __RTL8712_MACSETTING_BITDEF_H__
-
/*MACID*/
/*BSSID*/
@@ -28,7 +27,5 @@
/*BUILDUSER*/
-
-
#endif /* __RTL8712_MACSETTING_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
index e8cb2eee9294..64740d99c252 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
@@ -16,7 +16,5 @@
#define BUILDTIME (RTL8712_MACIDSETTING_ + 0x0024)
#define BUILDUSER (RTL8712_MACIDSETTING_ + 0x0028)
-
-
#endif /*__RTL8712_MACSETTING_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
index a3eaee0e1b69..9ed5653f3f7f 100644
--- a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
@@ -39,6 +39,5 @@
#define MCS_TXAGC7 (RTL8712_RATECTRL_ + 0x67)
#define CCK_TXAGC (RTL8712_RATECTRL_ + 0x68)
-
#endif /*__RTL8712_RATECTRL_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 0ffb30f1af7e..7f1fdd058551 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -56,7 +56,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
list_add_tail(&precvbuf->list,
- &(precvpriv->free_recv_buf_queue.queue));
+ &precvpriv->free_recv_buf_queue.queue);
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
@@ -123,8 +123,8 @@ void r8712_free_recvframe(union recv_frame *precvframe,
precvframe->u.hdr.pkt = NULL;
}
spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
- list_del_init(&(precvframe->u.hdr.list));
- list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
+ list_del_init(&precvframe->u.hdr.list);
+ list_add_tail(&precvframe->u.hdr.list, &pfree_recv_queue->queue);
if (padapter) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
@@ -319,7 +319,7 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
struct rx_pkt_attrib *pattrib;
_pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
nr_subframes = 0;
pattrib = &prframe->u.hdr.attrib;
@@ -485,8 +485,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
else
break;
}
- list_del_init(&(prframe->u.hdr.list));
- list_add_tail(&(prframe->u.hdr.list), plist);
+ list_del_init(&prframe->u.hdr.list);
+ list_add_tail(&prframe->u.hdr.list, plist);
return true;
}
@@ -520,7 +520,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = plist->next;
- list_del_init(&(prframe->u.hdr.list));
+ list_del_init(&prframe->u.hdr.list);
if (SN_EQUAL(preorder_ctrl->indicate_seq,
pattrib->seq_num))
preorder_ctrl->indicate_seq =
@@ -980,7 +980,7 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
union recv_frame *precvframe = NULL;
struct recv_priv *precvpriv = &padapter->recvpriv;
- pfree_recv_queue = &(precvpriv->free_recv_queue);
+ pfree_recv_queue = &precvpriv->free_recv_queue;
pbuf = pskb->data;
prxstat = (struct recv_stat *)pbuf;
pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
diff --git a/drivers/staging/rtl8712/rtl8712_security_bitdef.h b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
index 1c26a7eca64a..44275ef455a0 100644
--- a/drivers/staging/rtl8712/rtl8712_security_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
@@ -30,6 +30,5 @@
#define _RXUSEDK BIT(1)
#define _TXUSEDK BIT(0)
-
#endif /*__RTL8712_SECURITY_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_spec.h b/drivers/staging/rtl8712/rtl8712_spec.h
index c0bab4c49ae9..613a410e5714 100644
--- a/drivers/staging/rtl8712/rtl8712_spec.h
+++ b/drivers/staging/rtl8712/rtl8712_spec.h
@@ -30,7 +30,6 @@
#define RTL8712_IOBASE_FF 0x10300000 /*IOBASE_FIFO 0x1031000~0x103AFFFF*/
-
/*IOREG Offset for 8712*/
#define RTL8712_SYSCFG_ RTL8712_IOBASE_IOREG
#define RTL8712_CMDCTRL_ (RTL8712_IOBASE_IOREG + 0x40)
@@ -47,7 +46,6 @@
#define RTL8712_DEBUGCTRL_ (RTL8712_IOBASE_IOREG + 0x310)
#define RTL8712_OFFLOAD_ (RTL8712_IOBASE_IOREG + 0x2D0)
-
/*FIFO for 8712*/
#define RTL8712_DMA_BCNQ (RTL8712_IOBASE_FF + 0x10000)
#define RTL8712_DMA_MGTQ (RTL8712_IOBASE_FF + 0x20000)
@@ -60,7 +58,6 @@
#define RTL8712_DMA_H2CCMD (RTL8712_IOBASE_FF + 0x90000)
#define RTL8712_DMA_C2HCMD (RTL8712_IOBASE_FF + 0xA0000)
-
/*------------------------------*/
/*BIT 16 15*/
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
index a328ca9b340c..d92df3fbd2b1 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
@@ -117,20 +117,17 @@
* Block's Bandgap.
*/
-
/*--------------------------------------------------------------------------*/
/* SPS1_CTRL bits (Offset 0x18-1E, 56bits)*/
/*--------------------------------------------------------------------------*/
#define SPS1_SWEN BIT(1) /* Enable vsps18 SW Macro Block.*/
#define SPS1_LDEN BIT(0) /* Enable VSPS12 LDO Macro block.*/
-
/*----------------------------------------------------------------------------*/
/* LDOA15_CTRL bits (Offset 0x20, 8bits)*/
/*----------------------------------------------------------------------------*/
#define LDA15_EN BIT(0) /* Enable LDOA15 Macro Block*/
-
/*----------------------------------------------------------------------------*/
/* 8192S LDOV12D_CTRL bit (Offset 0x21, 8bits)*/
/*----------------------------------------------------------------------------*/
@@ -140,7 +137,6 @@
/*CLK_PS_CTRL*/
#define _CLK_GATE_EN BIT(0)
-
/* EFUSE_CTRL*/
#define EF_FLAG BIT(31) /* Access Flag, Write:1;
* Read:0
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
index e95eb5832ec4..da5efcdedabe 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
@@ -14,7 +14,6 @@
#ifndef __RTL8712_SYSCFG_REGDEF_H__
#define __RTL8712_SYSCFG_REGDEF_H__
-
#define SYS_ISO_CTRL (RTL8712_SYSCFG_ + 0x0000)
#define SYS_FUNC_EN (RTL8712_SYSCFG_ + 0x0002)
#define PMC_FSM (RTL8712_SYSCFG_ + 0x0004)
@@ -39,6 +38,5 @@
#define RCLK_MON (RTL8712_SYSCFG_ + 0x003E)
#define EFUSE_CLK_CTRL (RTL8712_SYSCFG_ + 0x02F8)
-
#endif /*__RTL8712_SYSCFG_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
index 1af5f1dd3c20..d7bc9dd5cecd 100644
--- a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
@@ -45,6 +45,5 @@
/*BCNERRTH*/
/*MLT*/
-
#endif /* __RTL8712_TIMECTRL_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
index d3b45c6cd855..ea164e482347 100644
--- a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
@@ -45,6 +45,5 @@
#define _RPT_CNT_MSK 0x000FFFFF
#define _RPT_CNT_SHT 0
-
#endif /*__RTL8712_WMAC_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index acda930722b2..4be96df5a329 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -202,7 +202,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_SITE_SURVEY);
- padapter->blnEnableRxFF0Filter = 0;
+ complete(&padapter->rx_filter_ready);
return _SUCCESS;
}
@@ -536,7 +536,7 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
return;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 95e9ea5b2d98..8453d8de8248 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -66,7 +66,6 @@ struct evt_priv {
u8 *evt_buf; /*shall be non-paged, and 4 bytes aligned*/
u8 *evt_allocated_buf;
u32 evt_done_cnt;
- struct tasklet_struct event_tasklet;
};
#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
@@ -316,7 +315,6 @@ enum _RT_CHANNEL_DOMAIN {
RT_CHANNEL_DOMAIN_MAX,
};
-
struct SetChannelPlan_param {
enum _RT_CHANNEL_DOMAIN ChannelPlan;
};
@@ -338,7 +336,6 @@ struct getdatarate_rsp {
u8 datarates[NumRates];
};
-
/*
* Caller Mode: Any
* AP: AP can use the info for the contents of beacon frame
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index 634e67461712..d6332a8c7f4f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -13,7 +13,6 @@
#define OID_802_11_PMKID 0x0d010123
#endif
-
/* For DDK-defined OIDs*/
#define OID_NDIS_SEG1 0x00010100
#define OID_NDIS_SEG2 0x00010200
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 3b6926613257..36f6904d25ab 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -82,9 +82,9 @@ static inline void handle_pairwise_key(struct sta_info *psta,
(param->u.crypt. key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
memcpy(psta->tkiptxmickey. skey,
- &(param->u.crypt.key[16]), 8);
+ &param->u.crypt.key[16], 8);
memcpy(psta->tkiprxmickey. skey,
- &(param->u.crypt.key[24]), 8);
+ &param->u.crypt.key[24], 8);
padapter->securitypriv. busetkipkey = false;
mod_timer(&padapter->securitypriv.tkip_timer,
jiffies + msecs_to_jiffies(50));
@@ -600,7 +600,7 @@ static int r8711_wx_get_name(struct net_device *dev,
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u8 *prates;
@@ -659,8 +659,8 @@ static int r8711_wx_set_freq(struct net_device *dev,
/* If setting by frequency, convert to a channel */
if ((fwrq->e == 1) &&
- (fwrq->m >= (int) 2.412e8) &&
- (fwrq->m <= (int) 2.487e8)) {
+ (fwrq->m >= 241200000) &&
+ (fwrq->m <= 248700000)) {
int f = fwrq->m / 100000;
int c = 0;
@@ -1494,7 +1494,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
u32 keyindex_provided;
struct NDIS_802_11_WEP wep;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
struct _adapter *padapter = netdev_priv(dev);
key = erq->flags & IW_ENCODE_INDEX;
@@ -1589,8 +1589,8 @@ static int r8711_wx_get_enc(struct net_device *dev,
{
uint key;
struct _adapter *padapter = netdev_priv(dev);
- struct iw_point *erq = &(wrqu->encoding);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_point *erq = &wrqu->encoding;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
union Keytype *dk = padapter->securitypriv.DefKey;
if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -1670,7 +1670,7 @@ static int r871x_wx_set_auth(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
- struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ struct iw_param *param = (struct iw_param *)&wrqu->param;
int paramid;
int paramval;
int ret = 0;
@@ -1964,7 +1964,7 @@ static int r871x_get_ap_info(struct net_device *dev,
return -EINVAL;
data[32] = 0;
- spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
+ spin_lock_irqsave(&pmlmepriv->scanned_queue.lock, irqL);
phead = &queue->queue;
plist = phead->next;
while (1) {
@@ -1974,7 +1974,7 @@ static int r871x_get_ap_info(struct net_device *dev,
if (!mac_pton(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
- spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock),
+ spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock,
irqL);
return -EINVAL;
}
@@ -1996,7 +1996,7 @@ static int r871x_get_ap_info(struct net_device *dev,
}
plist = plist->next;
}
- spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock), irqL);
+ spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock, irqL);
if (pdata->length >= 34) {
if (copy_to_user((u8 __user *)pdata->pointer + 32,
(u8 *)&pdata->flags, 1))
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index b78101afc93d..2b539335206a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -367,7 +367,6 @@ uint oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-
uint oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
return RNDIS_STATUS_SUCCESS;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 6cdc6f1a6bc6..34c9a52b4c42 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -22,7 +22,6 @@
#include "usb_osintf.h"
#include "usb_ops.h"
-
static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
{
u8 i;
@@ -76,7 +75,7 @@ static u8 do_join(struct _adapter *padapter)
* acquired by caller...
*/
struct wlan_bssid_ex *pdev_network =
- &(padapter->registrypriv.dev_network);
+ &padapter->registrypriv.dev_network;
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
pibss = padapter->registrypriv.dev_network.MacAddress;
memcpy(&pdev_network->Ssid,
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 92b7c9c07df6..63e12b157001 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -431,8 +431,7 @@ static int is_desired_network(struct _adapter *adapter,
bselected = false;
if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.InfrastructureMode !=
- adapter->mlmepriv.cur_network.network.
- InfrastructureMode)
+ adapter->mlmepriv.cur_network.network.InfrastructureMode)
bselected = false;
}
return bselected;
@@ -539,8 +538,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
struct wlan_bssid_ex *pdev_network =
&(adapter->registrypriv.dev_network);
u8 *pibss =
- adapter->registrypriv.
- dev_network.MacAddress;
+ adapter->registrypriv.dev_network.MacAddress;
pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
@@ -688,11 +686,9 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
pnetwork->network.Configuration.DSConfig =
le32_to_cpu(pnetwork->network.Configuration.DSConfig);
pnetwork->network.Configuration.FHConfig.DwellTime =
- le32_to_cpu(pnetwork->network.Configuration.FHConfig.
- DwellTime);
+ le32_to_cpu(pnetwork->network.Configuration.FHConfig.DwellTime);
pnetwork->network.Configuration.FHConfig.HopPattern =
- le32_to_cpu(pnetwork->network.Configuration.
- FHConfig.HopPattern);
+ le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopPattern);
pnetwork->network.Configuration.FHConfig.HopSet =
le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopSet);
pnetwork->network.Configuration.FHConfig.Length =
@@ -717,36 +713,29 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
if (the_same_macaddr) {
ptarget_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
+ r8712_find_network(&pmlmepriv->scanned_queue,
cur_network->network.MacAddress);
} else {
pcur_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
+ r8712_find_network(&pmlmepriv->scanned_queue,
cur_network->network.MacAddress);
if (pcur_wlan)
pcur_wlan->fixed = false;
pcur_sta = r8712_get_stainfo(pstapriv,
cur_network->network.MacAddress);
- spin_lock_irqsave(&pstapriv->
- sta_hash_lock, irqL2);
+ spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL2);
r8712_free_stainfo(adapter, pcur_sta);
- spin_unlock_irqrestore(&(pstapriv->
- sta_hash_lock), irqL2);
+ spin_unlock_irqrestore(&(pstapriv->sta_hash_lock), irqL2);
ptarget_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
- pnetwork->network.
- MacAddress);
+ r8712_find_network(&pmlmepriv->scanned_queue,
+ pnetwork->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
}
} else {
- ptarget_wlan = r8712_find_network(&pmlmepriv->
- scanned_queue,
+ ptarget_wlan = r8712_find_network(&pmlmepriv->scanned_queue,
pnetwork->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
@@ -779,39 +768,25 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ptarget_sta->aid = pnetwork->join_res;
ptarget_sta->qos_option = 1;
ptarget_sta->mac_id = 5;
- if (adapter->securitypriv.
- AuthAlgrthm == 2) {
- adapter->securitypriv.
- binstallGrpkey =
- false;
- adapter->securitypriv.
- busetkipkey =
- false;
- adapter->securitypriv.
- bgrpkey_handshake =
- false;
- ptarget_sta->ieee8021x_blocked
- = true;
- ptarget_sta->XPrivacy =
- adapter->securitypriv.
- PrivacyAlgrthm;
- memset((u8 *)&ptarget_sta->
- x_UncstKey,
+ if (adapter->securitypriv.AuthAlgrthm == 2) {
+ adapter->securitypriv.binstallGrpkey = false;
+ adapter->securitypriv.busetkipkey = false;
+ adapter->securitypriv.bgrpkey_handshake = false;
+ ptarget_sta->ieee8021x_blocked = true;
+ ptarget_sta->XPrivacy = adapter->
+ securitypriv.PrivacyAlgrthm;
+ memset((u8 *)&ptarget_sta->x_UncstKey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- tkiprxmickey,
+ memset((u8 *)&ptarget_sta->tkiprxmickey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- tkiptxmickey,
+ memset((u8 *)&ptarget_sta->tkiptxmickey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- txpn, 0,
+ memset((u8 *)&ptarget_sta->txpn, 0,
sizeof(union pn48));
- memset((u8 *)&ptarget_sta->
- rxpn, 0,
+ memset((u8 *)&ptarget_sta->rxpn, 0,
sizeof(union pn48));
}
} else {
@@ -942,8 +917,7 @@ void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf)
pdev_network = &(adapter->registrypriv.dev_network);
pibss = adapter->registrypriv.dev_network.MacAddress;
memcpy(pdev_network, &tgt_network->network,
- r8712_get_wlan_bssid_ex_sz(&tgt_network->
- network));
+ r8712_get_wlan_bssid_ex_sz(&tgt_network->network));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
sizeof(struct ndis_802_11_ssid));
@@ -1092,8 +1066,7 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
src_ssid = pmlmepriv->assoc_bssid;
if (!memcmp(dst_ssid, src_ssid, ETH_ALEN)) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (is_same_network(&pmlmepriv->
- cur_network.network,
+ if (is_same_network(&pmlmepriv->cur_network.network,
&pnetwork->network)) {
_clr_fwstate_(pmlmepriv,
_FW_UNDER_LINKING);
@@ -1284,26 +1257,13 @@ int r8712_restruct_wmm_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie,
*/
static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if (psecuritypriv->PMKIDList[i].bUsed &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid,
- bssid, ETH_ALEN)))
- break;
- i++;
-
- } while (i < NUM_PMKID_CACHE);
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
- if (i == NUM_PMKID_CACHE) {
- i = -1; /* Could not find. */
- } else {
- ; /* There is one Pre-Authentication Key for the
- * specific BSSID.
- */
- }
- return i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+ return i;
+ return -1;
}
sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 98204493a04c..aa4d5ce471f2 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -148,7 +148,6 @@ extern struct oid_obj_priv oid_rtl_seg_87_12_00[32];
#endif /* _RTL871X_MP_IOCTL_C_ */
-
enum MP_MODE {
MP_START_MODE,
MP_STOP_MODE,
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index ca5072e11e22..a08c5d2f59e3 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -26,7 +26,6 @@
#ifndef __RTL871X_MP_PHY_REGDEF_H
#define __RTL871X_MP_PHY_REGDEF_H
-
/*--------------------------Define Parameters-------------------------------*/
/*============================================================
@@ -1008,7 +1007,6 @@
#define ANTENNA_C 0x4
#define ANTENNA_D 0x8
-
/* accept all physical address */
#define RCR_AAP BIT(0)
#define RCR_APM BIT(1) /* accept physical match */
@@ -1032,6 +1030,5 @@
/*--------------------------Define Parameters-------------------------------*/
-
#endif /*__INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 66cc50f24e29..de9a568eaffa 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -455,7 +455,6 @@ static sint validate_recv_mgnt_frame(struct _adapter *adapter,
return _FAIL;
}
-
static sint validate_recv_data_frame(struct _adapter *adapter,
union recv_frame *precv_frame)
{
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index e0a1c30a8fe6..e46a5dbc7b65 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -381,7 +381,6 @@ void seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code,
#define P1K_SIZE 10 /* 80-bit Phase1 key */
#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
-
/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
{
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 9b7e5ffa380d..6286c622475e 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -21,7 +21,6 @@
#define NUM_STA 32
#define NUM_ACL 64
-
/* if mode ==0, then the sta is allowed once the addr is hit.
* if mode ==1, then the sta is rejected once the addr is non-hit.
*/
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index ee4c61f85a07..37364d3101e2 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -265,6 +265,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
{
+ r8712_free_io_queue(padapter);
}
void rtl871x_intf_stop(struct _adapter *padapter)
@@ -302,9 +303,6 @@ void r871x_dev_unload(struct _adapter *padapter)
rtl8712_hal_deinit(padapter);
}
- /*s6.*/
- if (padapter->dvobj_deinit)
- padapter->dvobj_deinit(padapter);
padapter->bup = false;
}
}
@@ -538,13 +536,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
} else {
AutoloadFail = false;
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
+ if ((!AutoloadFail) ||
+ ((mac[0] == 0xff) && (mac[1] == 0xff) &&
(mac[2] == 0xff) && (mac[3] == 0xff) &&
(mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
- (mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (!AutoloadFail)) {
+ (mac[4] == 0x00) && (mac[5] == 0x00))) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
@@ -568,7 +566,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 6. Load the firmware asynchronously */
if (rtl871x_load_fw(padapter))
goto deinit_drv_sw;
- spin_lock_init(&padapter->lock_rx_ff0_filter);
+ init_completion(&padapter->rx_filter_ready);
mutex_init(&padapter->mutex_start);
return 0;
@@ -607,6 +605,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
/* Stop driver mlme relation timer */
r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
+ if (padapter->dvobj_deinit)
+ padapter->dvobj_deinit(padapter);
r8712_free_drv_sw(padapter);
free_netdev(pnetdev);
diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
index e64845e6adf3..af9966d03979 100644
--- a/drivers/staging/rtl8712/usb_ops.c
+++ b/drivers/staging/rtl8712/usb_ops.c
@@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 1;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u8)(le32_to_cpu(data) & 0x0ff);
}
@@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 2;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u16)(le32_to_cpu(data) & 0xffff);
}
@@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 4;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return le32_to_cpu(data);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index f984a5ab2c6f..b2181e1e2d38 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -495,14 +495,21 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
}
status = usb_control_msg(udev, pipe, request, reqtype, value, index,
pIo_buf, len, 500);
- if (status > 0) { /* Success this control transfer. */
- if (requesttype == 0x01) {
- /* For Control read transfer, we have to copy the read
- * data from pIo_buf to pdata.
- */
- memcpy(pdata, pIo_buf, status);
- }
+ if (status < 0)
+ goto free;
+ if (status != len) {
+ status = -EREMOTEIO;
+ goto free;
+ }
+ /* Success this control transfer. */
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy the read
+ * data from pIo_buf to pdata.
+ */
+ memcpy(pdata, pIo_buf, status);
}
+
+free:
kfree(palloc_buf);
return status;
}
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index b8acb9c7395d..498e6dec7e67 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -186,7 +186,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _CAPABILITY_ 2
#define _TIMESTAMP_ 8
-
/*-----------------------------------------------------------------------------
* Below is the definition for WMM
*------------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 90d34cf9d2ff..4a93839bf947 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -95,18 +95,12 @@ void r8712_SetFilter(struct work_struct *work)
struct _adapter *adapter = container_of(work, struct _adapter,
wk_filter_rx_ff0);
u8 oldvalue = 0x00, newvalue = 0x00;
- unsigned long irqL;
oldvalue = r8712_read8(adapter, 0x117);
newvalue = oldvalue & 0xfe;
r8712_write8(adapter, 0x117, newvalue);
- spin_lock_irqsave(&adapter->lock_rx_ff0_filter, irqL);
- adapter->blnEnableRxFF0Filter = 1;
- spin_unlock_irqrestore(&adapter->lock_rx_ff0_filter, irqL);
- do {
- msleep(100);
- } while (adapter->blnEnableRxFF0Filter == 1);
+ wait_for_completion(&adapter->rx_filter_ready);
r8712_write8(adapter, 0x117, oldvalue);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 5478188be991..d30d6e6bcd07 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -520,12 +520,12 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
/* B0 Config LDPC Coding Capability */
if (TEST_FLAG(phtpriv_ap->ldpc_cap, LDPC_HT_ENABLE_TX) &&
- GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
+ GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
/* B7 B8 B9 Config STBC setting */
if (TEST_FLAG(phtpriv_ap->stbc_cap, STBC_HT_ENABLE_TX) &&
- GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
+ GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
} else {
phtpriv_sta->ampdu_enable = false;
@@ -1065,10 +1065,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
);
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
- (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+ (0x07 << 2));
} else {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+ 0x00);
}
rtw_hal_get_def_var(
@@ -1116,7 +1118,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pmlmepriv->htpriv.ht_option = false;
if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
- (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
+ (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
/* todo: */
/* ht_cap = false; */
}
@@ -1725,7 +1727,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_preamble--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0)) {
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -1763,7 +1765,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_slot_time++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -1775,7 +1777,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_slot_time--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -2024,7 +2026,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
start_bss_network(padapter);
if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
- (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
/* restore group key, WEP keys is restored in ips_leave() */
rtw_set_key(
padapter,
@@ -2062,7 +2064,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
/* pairwise key */
/* per sta pairwise key and settings */
if ((psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) ||
- (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
+ (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
rtw_setstakey_cmd(padapter, psta, true, false);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 14d37b369273..b4170f64d118 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -1238,7 +1238,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
/*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
/* if we raise bBusyTraffic in last watchdog, using lower threshold. */
if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- BusyThreshold = BusyThresholdLow;
+ BusyThreshold = BusyThresholdLow;
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
@@ -1885,11 +1885,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
}
@@ -1916,11 +1913,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, 1);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 3d3c77273026..06e727ce9cc2 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -100,7 +100,7 @@ u8 PwrState)
u16
Efuse_GetCurrentSize(
struct adapter *padapter,
- u8 efuseType,
+ u8 efuseType,
bool bPseudoTest)
{
return padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType,
@@ -124,29 +124,29 @@ Efuse_CalculateWordCnts(u8 word_en)
}
/* */
-/* Description: */
-/* 1. Execute E-Fuse read byte operation according as map offset and */
-/* save to E-Fuse table. */
-/* 2. Referred from SD1 Richard. */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Referred from SD1 Richard. */
/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
/* */
-/* Created by Roger, 2008.10.21. */
+/* Created by Roger, 2008.10.21. */
/* */
-/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
-/* 2. Add efuse utilization collect. */
-/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
-/* write addr must be after sec5. */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
/* */
void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
- u16 _offset,
- u16 _size_byte,
+ u16 _offset,
+ u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
);
@@ -154,8 +154,8 @@ void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
- u16 _offset,
- u16 _size_byte,
+ u16 _offset,
+ u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
)
@@ -168,7 +168,7 @@ EFUSE_GetEfuseDefinition(
struct adapter *padapter,
u8 efuseType,
u8 type,
- void *pOut,
+ void *pOut,
bool bPseudoTest
)
{
@@ -194,7 +194,7 @@ EFUSE_GetEfuseDefinition(
u8
EFUSE_Read1Byte(
struct adapter *Adapter,
-u16 Address)
+u16 Address)
{
u8 Bytetemp = {0x00};
u8 temp = {0x00};
@@ -235,8 +235,8 @@ u16 Address)
u8
efuse_OneByteRead(
struct adapter *padapter,
-u16 addr,
-u8 *data,
+u16 addr,
+u8 *data,
bool bPseudoTest)
{
u32 tmpidx = 0;
@@ -324,8 +324,8 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
int
Efuse_PgPacketRead(struct adapter *padapter,
- u8 offset,
- u8 *data,
+ u8 offset,
+ u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data,
@@ -334,9 +334,9 @@ Efuse_PgPacketRead(struct adapter *padapter,
int
Efuse_PgPacketWrite(struct adapter *padapter,
- u8 offset,
- u8 word_en,
- u8 *data,
+ u8 offset,
+ u8 word_en,
+ u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en,
@@ -386,7 +386,7 @@ efuse_WordEnableDataRead(u8 word_en,
u8
Efuse_WordEnableDataWrite(struct adapter *padapter,
- u16 efuse_addr,
+ u16 efuse_addr,
u8 word_en,
u8 *data,
bool bPseudoTest)
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index b449be537376..68e41d99679d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -94,16 +94,14 @@ bool rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
- if (channel > 14) {
+ if (channel > 14)
return WIRELESS_INVALID;
- } else { /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
- }
+ /* could be pure B, pure G, or B/G */
+ if (rtw_is_cckratesonly_included(rate))
+ return WIRELESS_11B;
+ if (rtw_is_cckrates_included(rate))
+ return WIRELESS_11BG;
+ return WIRELESS_11G;
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -151,11 +149,10 @@ u8 *rtw_get_ie(u8 *pbuf, signed int index, signed int *len, signed int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -199,9 +196,8 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
*ielen = in_ie[cnt+1]+2;
break;
- } else {
- cnt += in_ie[cnt+1]+2; /* goto next */
}
+ cnt += in_ie[cnt+1]+2; /* goto next */
}
return target_ie;
@@ -339,9 +335,8 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
ie = rtw_set_ie(ie, WLAN_EID_IBSS_PARAMS, 2, (u8 *)&(pdev_network->configuration.atim_window), &sz);
- if (rateLen > 8) {
+ if (rateLen > 8)
ie = rtw_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8), (pdev_network->supported_rates + 8), &sz);
- }
/* HT Cap. */
if ((pregistrypriv->wireless_mode & WIRELESS_11_24N) &&
@@ -370,9 +365,8 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
if (pbuf) {
/* check if oui matches... */
- if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) {
+ if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
goto check_next_ie;
- }
/* check version... */
memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
@@ -497,9 +491,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (!memcmp(pos, SUITE_1X, 4)) {
+ if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
- }
}
}
@@ -518,9 +511,8 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
return _FAIL;
}
- if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
+ if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
return _FAIL;
- }
pos = rsn_ie;
pos += 4;
@@ -697,9 +689,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt+1]+2;
break;
- } else {
- cnt += in_ie[cnt+1]+2; /* goto next */
}
+ cnt += in_ie[cnt+1]+2; /* goto next */
}
return wpsie_ptr;
@@ -748,9 +739,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index ed2d3b7d44d9..f2242cf2dfb4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -751,7 +751,9 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ spin_unlock_bh(&pmlmepriv->lock);
del_timer_sync(&pmlmepriv->scan_to_timer);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
@@ -792,7 +794,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (_SUCCESS == s_ret) {
+ if (s_ret == _SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
} else if (s_ret == 2) {/* there is no need to wait for join */
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
@@ -1238,8 +1240,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ spin_unlock_bh(&pmlmepriv->lock);
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
+ spin_lock_bh(&pmlmepriv->lock);
} else {
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
}
@@ -1545,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1554,7 +1558,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
int do_join_r;
do_join_r = rtw_do_join(adapter);
- if (_SUCCESS != do_join_r) {
+ if (do_join_r != _SUCCESS) {
continue;
}
break;
@@ -1573,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
}
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
}
/*
@@ -1586,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
rtw_indicate_scan_done(adapter, true);
}
@@ -2036,28 +2040,14 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
- break;
- } else {
- i++;
- /* continue; */
- }
-
- } while (i < NUM_PMKID_CACHE);
-
- if (i == NUM_PMKID_CACHE) {
- i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
-
- return i;
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if ((p->PMKIDList[i].bUsed) &&
+ (!memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN)))
+ return i;
+ return -1;
}
/* */
@@ -2558,7 +2548,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
- if (0 == issued) {
+ if (issued == 0) {
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
}
@@ -2608,30 +2598,20 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
- int do_join_r;
- if (0 < rtw_to_roam(padapter)) {
+ if (rtw_to_roam(padapter) > 0) {
memcpy(&pmlmepriv->assoc_ssid, &cur_network->network.ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
- while (1) {
- do_join_r = rtw_do_join(padapter);
- if (_SUCCESS == do_join_r) {
+ while (rtw_do_join(padapter) != _SUCCESS) {
+ rtw_dec_to_roam(padapter);
+ if (rtw_to_roam(padapter) <= 0) {
+ rtw_indicate_disconnect(padapter);
break;
- } else {
- rtw_dec_to_roam(padapter);
-
- if (rtw_to_roam(padapter) > 0) {
- continue;
- } else {
- rtw_indicate_disconnect(padapter);
- break;
- }
}
}
}
-
}
signed int rtw_linked_check(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 49a3f45cb771..1bdbd0971f73 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -271,11 +271,9 @@ static int has_channel(struct rt_channel_info *channel_set,
{
int i;
- for (i = 0; i < chanset_size; i++) {
- if (channel_set[i].ChannelNum == chan) {
+ for (i = 0; i < chanset_size; i++)
+ if (channel_set[i].ChannelNum == chan)
return 1;
- }
- }
return 0;
}
@@ -311,11 +309,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
if (!has_channel(channel_set, chanset_size, ch))
continue;
- if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
continue;
if ((0 < (padapter->registrypriv.bw_mode & 0xf0)) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (!reg) {
@@ -345,7 +343,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (is_supported_24g(padapter->registrypriv.wireless_mode)) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -355,14 +353,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_NULL == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_NULL)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) { /* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) { /* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -649,9 +647,8 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0) {
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
update_beacon_info(padapter, pframe, len, psta);
- }
} else {
/* allocate a new CAM entry for IBSS station */
cam_idx = allocate_fw_sta_entry(padapter);
@@ -911,16 +908,14 @@ unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_fram
set_link_timer(pmlmeext, REAUTH_TO);
return _SUCCESS;
- } else {
- /* open system */
- go2asoc = 1;
}
+ /* open system */
+ go2asoc = 1;
} else if (seq == 4) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
go2asoc = 1;
- } else {
+ else
goto authclnt_fail;
- }
} else {
/* this is also illegal */
goto authclnt_fail;
@@ -1331,7 +1326,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (WLAN_STATUS_SUCCESS == status)) {
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == WLAN_STATUS_SUCCESS)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -1455,11 +1450,10 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
UpdateBrateTbl(padapter, pmlmeinfo->network.supported_rates);
report_assoc_result:
- if (res > 0) {
+ if (res > 0)
rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
- } else {
+ else
rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
- }
report_join_res(padapter, res);
@@ -1473,6 +1467,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
+ int ignore_received_deauth = 0;
/* check A3 */
if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
@@ -1508,36 +1503,33 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
- } else {
- int ignore_received_deauth = 0;
-
- /* Commented by Albert 20130604 */
- /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
- /* we will send the deauth first. */
- /* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
- /* Added the following code to avoid this case. */
- if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
- (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
- if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
- ignore_received_deauth = 1;
- } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
- /* TODO: 802.11r */
- ignore_received_deauth = 1;
- }
- }
-
- netdev_dbg(padapter->pnetdev,
- "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
- reason, GetAddr3Ptr(pframe),
- ignore_received_deauth);
+ }
- if (0 == ignore_received_deauth) {
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ /* Commented by Albert 20130604 */
+ /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
+ /* we will send the deauth first. */
+ /* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
+ /* Added the following code to avoid this case. */
+ if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
+ (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
+ if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
+ ignore_received_deauth = 1;
+ } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
+ /* TODO: 802.11r */
+ ignore_received_deauth = 1;
}
}
+
+ netdev_dbg(padapter->pnetdev,
+ "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+ reason, GetAddr3Ptr(pframe),
+ ignore_received_deauth);
+
+ if (ignore_received_deauth == 0)
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
-
}
unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
@@ -1581,13 +1573,13 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
}
return _SUCCESS;
- } else {
- netdev_dbg(padapter->pnetdev,
- "sta recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
-
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
+ netdev_dbg(padapter->pnetdev,
+ "sta recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
@@ -1674,11 +1666,10 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
/* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
- if (pmlmeinfo->accept_addba_req) {
+ if (pmlmeinfo->accept_addba_req)
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 0);
- } else {
+ else
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
- }
break;
@@ -1774,9 +1765,8 @@ static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
- if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
+ if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
- }
return ret;
}
@@ -2187,9 +2177,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
- if (wps_ie && wps_ielen > 0) {
+ if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
- }
if (sr != 0)
set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
else
@@ -2245,9 +2234,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8) {
+ if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
- }
/* todo:HT for adhoc */
@@ -2400,7 +2388,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pframe += ssid_ielen_diff;
pattrib->pktlen += ssid_ielen_diff;
}
- kfree (buf);
+ kfree(buf);
}
} else {
/* timestamp will be inserted by hardware */
@@ -2447,9 +2435,8 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
/* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8) {
+ if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
- }
/* todo:HT for adhoc */
@@ -2674,9 +2661,8 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
/* setting auth algo number */
val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
- if (val16) {
+ if (val16)
use_shared_key = 1;
- }
le_tmp = cpu_to_le16(val16);
/* setting IV for auth seq #3 */
@@ -2831,16 +2817,14 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
break;
}
- if (!pbuf || ie_len == 0) {
+ if (!pbuf || ie_len == 0)
break;
- }
}
}
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, 6, REALTEK_96B_IE, &(pattrib->pktlen));
- }
/* add WPS IE ie for wps 2.0 */
if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -3301,9 +3285,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
+ if (!pmgntframe)
goto exit;
- }
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -3552,13 +3535,13 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
rtw_hal_get_def_var(padapter,
HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
- if (IEEE80211_HT_MAX_AMPDU_64K == max_rx_ampdu_factor)
+ if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_32K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_16K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_8K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
else
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
@@ -3627,9 +3610,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
+ if (!pmgntframe)
return;
- }
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -3802,10 +3784,8 @@ unsigned int send_beacon(struct adapter *padapter)
} while (false == bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped) {
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return _FAIL;
- }
-
if (!bxmitok)
return _FAIL;
@@ -4388,9 +4368,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
}
/* skip AP 2.4G channel plan */
- while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
j++;
- }
}
pmlmeext->update_channel_plan_by_ap_done = 1;
@@ -4402,9 +4381,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
i = 0;
while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
if (chplan_new[i].ChannelNum == channel) {
- if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE)
chplan_new[i].ScanType = SCAN_ACTIVE;
- }
break;
}
i++;
@@ -4629,9 +4607,8 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!pcmd_obj) {
+ if (!pcmd_obj)
return;
- }
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
@@ -5086,7 +5063,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
@@ -5124,9 +5101,8 @@ void survey_timer_hdl(struct timer_list *t)
/* issue rtw_sitesurvey_cmd */
if (pmlmeext->sitesurvey_res.state > SCAN_START) {
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
pmlmeext->sitesurvey_res.channel_idx++;
- }
if (pmlmeext->scan_abort) {
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
@@ -5135,24 +5111,18 @@ void survey_timer_hdl(struct timer_list *t)
}
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!ph2c) {
- goto exit_survey_timer_hdl;
- }
+ if (!ph2c)
+ return;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (!psurveyPara) {
kfree(ph2c);
- goto exit_survey_timer_hdl;
+ return;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
rtw_enqueue_cmd(pcmdpriv, ph2c);
}
-
-
-exit_survey_timer_hdl:
-
- return;
}
void link_timer_hdl(struct timer_list *t)
@@ -5173,17 +5143,9 @@ void link_timer_hdl(struct timer_list *t)
} else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
/* re-auth timer */
if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
- /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
- /* */
- pmlmeinfo->state = 0;
- report_join_res(padapter, -1);
- return;
- /* */
- /* else */
- /* */
- /* pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
- /* pmlmeinfo->reauth_count = 0; */
- /* */
+ pmlmeinfo->state = 0;
+ report_join_res(padapter, -1);
+ return;
}
pmlmeinfo->auth_seq = 1;
@@ -5348,9 +5310,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* check already connecting to AP or not */
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- if (pmlmeinfo->state & WIFI_FW_STATION_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
- }
pmlmeinfo->state = WIFI_FW_NULL_STATE;
/* clear CAM */
@@ -5485,9 +5446,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
u8 val8;
- if (is_client_associated_to_ap(padapter)) {
+ if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
- }
if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
/* Stop BCN */
@@ -6073,7 +6033,7 @@ u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf)
struct RunInThread_param *p;
- if (NULL == pbuf)
+ if (pbuf == NULL)
return H2C_PARAMETERS_ERROR;
p = (struct RunInThread_param *)pbuf;
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
index 96eb8ca38003..4f120c894998 100644
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -8,47 +8,27 @@
#include <drv_types.h>
#include <linux/kernel.h>
-
-struct ch_freq {
- u32 channel;
- u32 frequency;
-};
-
-static struct ch_freq ch_freq_map[] = {
- {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
- {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
- {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
- /* UNII */
- {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
- {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
- {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
- {171, 5855}, {173, 5865},
- /* HiperLAN2 */
- {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
- {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
- {140, 5700},
- /* Japan MMAC */
- {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
- /* Japan */
- {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
- {208, 5040},/* Japan, means J08 */
- {212, 5060},/* Japan, means J12 */
- {216, 5080},/* Japan, means J16 */
+static const u32 ch_freq_map[] = {
+ 2412,
+ 2417,
+ 2422,
+ 2427,
+ 2432,
+ 2437,
+ 2442,
+ 2447,
+ 2452,
+ 2457,
+ 2462,
+ 2467,
+ 2472,
+ 2484
};
u32 rtw_ch2freq(u32 channel)
{
- u8 i;
- u32 freq = 0;
-
- for (i = 0; i < ARRAY_SIZE(ch_freq_map); i++) {
- if (channel == ch_freq_map[i].channel) {
- freq = ch_freq_map[i].frequency;
- break;
- }
- }
- if (i == ARRAY_SIZE(ch_freq_map))
- freq = 2412;
+ if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
+ return 2412;
- return freq;
+ return ch_freq_map[channel - 1];
}
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index af50674b2a65..9091f2f75fe1 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -68,16 +68,6 @@ enum btc_chip_interface {
BTC_INTF_MAX
};
-enum {
- BTC_CHIP_UNDEF = 0,
- BTC_CHIP_CSR_BC4 = 1,
- BTC_CHIP_CSR_BC8 = 2,
- BTC_CHIP_RTL8723A = 3,
- BTC_CHIP_RTL8821 = 4,
- BTC_CHIP_RTL8723B = 5,
- BTC_CHIP_MAX
-};
-
/* following is for wifi link status */
#define WIFI_STA_CONNECTED BIT0
#define WIFI_AP_CONNECTED BIT1
@@ -87,7 +77,6 @@ enum {
struct btc_board_info {
/* The following is some board information */
- u8 btChipType;
u8 pgAntNum; /* pg ant number */
u8 btdmAntNum; /* ant number for btdm */
u8 btdmAntPos; /* Bryant Add to indicate Antenna Position for (pgAntNum = 2) && (btdmAntNum = 1) (DPDT+1Ant case) */
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index f4b3e8b28712..9acd49323c7c 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1113,11 +1113,6 @@ void EXhalbtcoutsrc_Periodical(struct btc_coexist *pBtCoexist)
/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
}
-void EXhalbtcoutsrc_SetChipType(u8 chipType)
-{
- GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723B;
-}
-
void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum)
{
if (BT_COEX_ANT_TYPE_PG == type) {
@@ -1188,9 +1183,6 @@ void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType)
pHalData = GET_HAL_DATA(padapter);
- pHalData->bt_coexist.btChipType = chipType;
-
- EXhalbtcoutsrc_SetChipType(chipType);
}
void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum)
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index a545832a468e..107f427ee4aa 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -811,17 +811,14 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
SIZE_PTR alignment = 0;
recvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
-
- if (recvbuf->pskb) {
- recvbuf->pskb->dev = adapter->pnetdev;
-
- tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
- }
-
if (!recvbuf->pskb)
return NULL;
+
+ recvbuf->pskb->dev = adapter->pnetdev;
+
+ tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+ skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
}
/* 3 3. read data from rxfifo */
diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h
index 8f654a49fb9d..d0ce21ccc1cc 100644
--- a/drivers/staging/rtl8723bs/include/HalVerDef.h
+++ b/drivers/staging/rtl8723bs/include/HalVerDef.h
@@ -9,16 +9,7 @@
/* hal_ic_type_e */
enum hal_ic_type_e { /* tag_HAL_IC_Type_Definition */
- CHIP_8192S = 0,
- CHIP_8188C = 1,
- CHIP_8192C = 2,
- CHIP_8192D = 3,
- CHIP_8723A = 4,
- CHIP_8188E = 5,
- CHIP_8812 = 6,
- CHIP_8821 = 7,
CHIP_8723B = 8,
- CHIP_8192E = 9,
};
/* hal_chip_type_e */
@@ -58,7 +49,6 @@ struct hal_version { /* tag_HAL_VERSION */
u8 ROMVer;
};
-/* VERSION_8192C VersionID; */
/* hal_version VersionID; */
/* Get element */
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 0ce08c2a0755..0bbbdebdf157 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -42,7 +42,6 @@
#include <rtw_mlme.h>
#include <mlme_osdep.h>
#include <rtw_io.h>
-#include <rtw_ioctl.h>
#include <rtw_ioctl_set.h>
#include <osdep_intf.h>
#include <rtw_eeprom.h>
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
index 8213dcf48b34..d8d03752dc2e 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -72,13 +72,9 @@
#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS Multi-Function control source. */
#define REG_GSSR 0x006c
#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
-#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
#define REG_MCUFWDL 0x0080
-#define REG_WOL_EVENT 0x0081 /* RTL8188E */
#define REG_MCUTSTCFG 0x0084
#define REG_FDHM0 0x0088
-#define REG_HOST_SUSP_CNT 0x00BC /* RTL8192C Host suspend counter on FPGA platform */
-#define REG_SYSTEM_ON_CTRL 0x00CC /* For 8723AE Reset after S3 */
#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection for RTL8723 */
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
@@ -117,7 +113,6 @@
#define REG_FWIMR 0x0130
#define REG_FWISR 0x0134
#define REG_FTIMR 0x0138
-#define REG_FTISR 0x013C /* RTL8192C */
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
#define REG_PKTBUF_DBG_DATA_L 0x0144
@@ -132,11 +127,9 @@
#define REG_MBIST_START 0x0174
#define REG_MBIST_DONE 0x0178
#define REG_MBIST_FAIL 0x017C
-#define REG_32K_CTRL 0x0194 /* RTL8188E */
#define REG_C2HEVT_MSG_NORMAL 0x01A0
#define REG_C2HEVT_CLEAR 0x01AF
#define REG_MCUTST_1 0x01c0
-#define REG_MCUTST_WOWLAN 0x01C7 /* Defined after 8188E series. */
#define REG_FMETHR 0x01C8
#define REG_HMETFR 0x01CC
#define REG_HMEBOX_0 0x01D0
@@ -526,44 +519,6 @@
#define MAX_MSS_DENSITY_1T 0x0A
/* */
-/* 8192C Cmd9346CR bits (Offset 0xA, 16bit) */
-/* */
-#define CmdEEPROM_En BIT5 /* EEPROM enable when set 1 */
-#define CmdEERPOMSEL BIT4 /* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
-#define Cmd9346CR_9356SEL BIT4
-
-/* */
-/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
-/* */
-#define GPIOSEL_GPIO 0
-#define GPIOSEL_ENBT BIT5
-
-/* */
-/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
-/* */
-#define GPIO_IN REG_GPIO_PIN_CTRL /* GPIO pins input value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) /* GPIO pins output value */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
-
-/* */
-/* 8811A GPIO PIN Control Register (offset 0x60, 4 byte) */
-/* */
-#define GPIO_IN_8811A REG_GPIO_PIN_CTRL_2 /* GPIO pins input value */
-#define GPIO_OUT_8811A (REG_GPIO_PIN_CTRL_2+1) /* GPIO pins output value */
-#define GPIO_IO_SEL_8811A (REG_GPIO_PIN_CTRL_2+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD_8811A (REG_GPIO_PIN_CTRL_2+3)
-
-/* */
-/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-/* */
-#define HSIMR_GPIO12_0_INT_EN BIT0
-#define HSIMR_SPS_OCP_INT_EN BIT5
-#define HSIMR_RON_INT_EN BIT6
-#define HSIMR_PDN_INT_EN BIT7
-#define HSIMR_GPIO9_INT_EN BIT25
-
-/* */
/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
/* */
#define HSISR_GPIO12_0_INT BIT0
@@ -573,22 +528,6 @@
#define HSISR_GPIO9_INT BIT25
/* */
-/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-/* */
-/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
-/* */
/* USB INTR CONTENT */
/* */
#define USB_C2H_CMDID_OFFSET 0
@@ -787,206 +726,6 @@ Default: 00b.
#define IMR_WLANOFF BIT0
/* */
-/* 8723E series PCIE Host IMR/ISR bit */
-/* */
-/* IMR DW0 Bit 0-31 */
-#define PHIMR_TIMEOUT2 BIT31
-#define PHIMR_TIMEOUT1 BIT30
-#define PHIMR_PSTIMEOUT BIT29
-#define PHIMR_GTINT4 BIT28
-#define PHIMR_GTINT3 BIT27
-#define PHIMR_TXBCNERR BIT26
-#define PHIMR_TXBCNOK BIT25
-#define PHIMR_TSF_BIT32_TOGGLE BIT24
-#define PHIMR_BCNDMAINT3 BIT23
-#define PHIMR_BCNDMAINT2 BIT22
-#define PHIMR_BCNDMAINT1 BIT21
-#define PHIMR_BCNDMAINT0 BIT20
-#define PHIMR_BCNDOK3 BIT19
-#define PHIMR_BCNDOK2 BIT18
-#define PHIMR_BCNDOK1 BIT17
-#define PHIMR_BCNDOK0 BIT16
-#define PHIMR_HSISR_IND_ON BIT15
-#define PHIMR_BCNDMAINT_E BIT14
-#define PHIMR_ATIMEND_E BIT13
-#define PHIMR_ATIM_CTW_END BIT12
-#define PHIMR_HISRE_IND BIT11 /* RO. HISRE Indicator (HISRE & HIMRE is true, this bit is set to 1) */
-#define PHIMR_C2HCMD BIT10
-#define PHIMR_CPWM2 BIT9
-#define PHIMR_CPWM BIT8
-#define PHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
-#define PHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
-#define PHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
-#define PHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
-#define PHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
-#define PHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
-#define PHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
-#define PHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
-
-/* PCIE Host Interrupt Status Extension bit */
-#define PHIMR_BCNDMAINT7 BIT23
-#define PHIMR_BCNDMAINT6 BIT22
-#define PHIMR_BCNDMAINT5 BIT21
-#define PHIMR_BCNDMAINT4 BIT20
-#define PHIMR_BCNDOK7 BIT19
-#define PHIMR_BCNDOK6 BIT18
-#define PHIMR_BCNDOK5 BIT17
-#define PHIMR_BCNDOK4 BIT16
-/* bit12 15: RSVD */
-#define PHIMR_TXERR BIT11
-#define PHIMR_RXERR BIT10
-#define PHIMR_TXFOVW BIT9
-#define PHIMR_RXFOVW BIT8
-/* bit2-7: RSVD */
-#define PHIMR_OCPINT BIT1
-/* bit0: RSVD */
-
-#define UHIMR_TIMEOUT2 BIT31
-#define UHIMR_TIMEOUT1 BIT30
-#define UHIMR_PSTIMEOUT BIT29
-#define UHIMR_GTINT4 BIT28
-#define UHIMR_GTINT3 BIT27
-#define UHIMR_TXBCNERR BIT26
-#define UHIMR_TXBCNOK BIT25
-#define UHIMR_TSF_BIT32_TOGGLE BIT24
-#define UHIMR_BCNDMAINT3 BIT23
-#define UHIMR_BCNDMAINT2 BIT22
-#define UHIMR_BCNDMAINT1 BIT21
-#define UHIMR_BCNDMAINT0 BIT20
-#define UHIMR_BCNDOK3 BIT19
-#define UHIMR_BCNDOK2 BIT18
-#define UHIMR_BCNDOK1 BIT17
-#define UHIMR_BCNDOK0 BIT16
-#define UHIMR_HSISR_IND BIT15
-#define UHIMR_BCNDMAINT_E BIT14
-/* RSVD BIT13 */
-#define UHIMR_CTW_END BIT12
-/* RSVD BIT11 */
-#define UHIMR_C2HCMD BIT10
-#define UHIMR_CPWM2 BIT9
-#define UHIMR_CPWM BIT8
-#define UHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
-#define UHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
-#define UHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
-#define UHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
-#define UHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
-#define UHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
-#define UHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
-#define UHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
-
-/* USB Host Interrupt Status Extension bit */
-#define UHIMR_BCNDMAINT7 BIT23
-#define UHIMR_BCNDMAINT6 BIT22
-#define UHIMR_BCNDMAINT5 BIT21
-#define UHIMR_BCNDMAINT4 BIT20
-#define UHIMR_BCNDOK7 BIT19
-#define UHIMR_BCNDOK6 BIT18
-#define UHIMR_BCNDOK5 BIT17
-#define UHIMR_BCNDOK4 BIT16
-/* bit14-15: RSVD */
-#define UHIMR_ATIMEND_E BIT13
-#define UHIMR_ATIMEND BIT12
-#define UHIMR_TXERR BIT11
-#define UHIMR_RXERR BIT10
-#define UHIMR_TXFOVW BIT9
-#define UHIMR_RXFOVW BIT8
-/* bit2-7: RSVD */
-#define UHIMR_OCPINT BIT1
-/* bit0: RSVD */
-
-
-#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
-#define HAL_NIC_UNPLUG_PCI_ISR 0xEAEAEAEA /* The value when the NIC is unplugged for PCI in PCI interrupt (page 3). */
-
-/* */
-/* 8188 IMR/ISR bits */
-/* */
-#define IMR_DISABLED_88E 0x0
-/* IMR DW0(0x0060-0063) Bit 0-31 */
-#define IMR_TXCCK_88E BIT30 /* TXRPT interrupt when CCX bit of the packet is set */
-#define IMR_PSTIMEOUT_88E BIT29 /* Power Save Time Out Interrupt */
-#define IMR_GTINT4_88E BIT28 /* When GTIMER4 expires, this bit is set to 1 */
-#define IMR_GTINT3_88E BIT27 /* When GTIMER3 expires, this bit is set to 1 */
-#define IMR_TBDER_88E BIT26 /* Transmit Beacon0 Error */
-#define IMR_TBDOK_88E BIT25 /* Transmit Beacon0 OK */
-#define IMR_TSF_BIT32_TOGGLE_88E BIT24 /* TSF Timer BIT32 toggle indication interrupt */
-#define IMR_BCNDMAINT0_88E BIT20 /* Beacon DMA Interrupt 0 */
-#define IMR_BCNDERR0_88E BIT16 /* Beacon Queue DMA Error 0 */
-#define IMR_HSISR_IND_ON_INT_88E BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
-#define IMR_BCNDMAINT_E_88E BIT14 /* Beacon DMA Interrupt Extension for Win7 */
-#define IMR_ATIMEND_88E BIT12 /* CTWidnow End or ATIM Window End */
-#define IMR_HISR1_IND_INT_88E BIT11 /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
-#define IMR_C2HCMD_88E BIT10 /* CPU to Host Command INT Status, Write 1 clear */
-#define IMR_CPWM2_88E BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_CPWM_88E BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_HIGHDOK_88E BIT7 /* High Queue DMA OK */
-#define IMR_MGNTDOK_88E BIT6 /* Management Queue DMA OK */
-#define IMR_BKDOK_88E BIT5 /* AC_BK DMA OK */
-#define IMR_BEDOK_88E BIT4 /* AC_BE DMA OK */
-#define IMR_VIDOK_88E BIT3 /* AC_VI DMA OK */
-#define IMR_VODOK_88E BIT2 /* AC_VO DMA OK */
-#define IMR_RDU_88E BIT1 /* Rx Descriptor Unavailable */
-#define IMR_ROK_88E BIT0 /* Receive DMA OK */
-
-/* IMR DW1(0x00B4-00B7) Bit 0-31 */
-#define IMR_BCNDMAINT7_88E BIT27 /* Beacon DMA Interrupt 7 */
-#define IMR_BCNDMAINT6_88E BIT26 /* Beacon DMA Interrupt 6 */
-#define IMR_BCNDMAINT5_88E BIT25 /* Beacon DMA Interrupt 5 */
-#define IMR_BCNDMAINT4_88E BIT24 /* Beacon DMA Interrupt 4 */
-#define IMR_BCNDMAINT3_88E BIT23 /* Beacon DMA Interrupt 3 */
-#define IMR_BCNDMAINT2_88E BIT22 /* Beacon DMA Interrupt 2 */
-#define IMR_BCNDMAINT1_88E BIT21 /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK7_88E BIT20 /* Beacon Queue DMA OK Interrupt 7 */
-#define IMR_BCNDOK6_88E BIT19 /* Beacon Queue DMA OK Interrupt 6 */
-#define IMR_BCNDOK5_88E BIT18 /* Beacon Queue DMA OK Interrupt 5 */
-#define IMR_BCNDOK4_88E BIT17 /* Beacon Queue DMA OK Interrupt 4 */
-#define IMR_BCNDOK3_88E BIT16 /* Beacon Queue DMA OK Interrupt 3 */
-#define IMR_BCNDOK2_88E BIT15 /* Beacon Queue DMA OK Interrupt 2 */
-#define IMR_BCNDOK1_88E BIT14 /* Beacon Queue DMA OK Interrupt 1 */
-#define IMR_ATIMEND_E_88E BIT13 /* ATIM Window End Extension for Win7 */
-#define IMR_TXERR_88E BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
-#define IMR_RXERR_88E BIT10 /* Rx Error Flag INT Status, Write 1 clear */
-#define IMR_TXFOVW_88E BIT9 /* Transmit FIFO Overflow */
-#define IMR_RXFOVW_88E BIT8 /* Receive FIFO Overflow */
-
-/*===================================================================
-=====================================================================
-Here the register defines are for 92C. When the define is as same with 92C,
-we will use the 92C's define for the consistency
-So the following defines for 92C is not entire!!!!!!
-=====================================================================
-=====================================================================*/
-/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
- /* */
- /* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
- /* */
-/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet RTL8192S/RTL8192C are wrong, */
-/* the correct arrangement is VO - Bit0, VI - Bit1, BE - Bit2, and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. Confirmed with DD Tim. */
-/* By Bruce, 2011-09-22. */
-#define StopBecon BIT6
-#define StopHigh BIT5
-#define StopMgt BIT4
-#define StopBK BIT3
-#define StopBE BIT2
-#define StopVI BIT1
-#define StopVO BIT0
-
-/* */
/* 8192C (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
/* */
#define RCR_APPFCS BIT31 /* WMAC append FCS after pauload */
@@ -1557,10 +1296,6 @@ Current IOREG MAP
#define SDIO_HIMR_ATIMEND_E_MSK BIT26
#define SDIO_HIMR_CTWEND_MSK BIT27
-/* RTL8188E SDIO Specific */
-#define SDIO_HIMR_MCU_ERR_MSK BIT28
-#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT29
-
/* SDIO Host Interrupt Service Routine */
#define SDIO_HISR_RX_REQUEST BIT0
#define SDIO_HISR_AVAL BIT1
@@ -1583,10 +1318,6 @@ Current IOREG MAP
#define SDIO_HISR_ATIMEND_E BIT26
#define SDIO_HISR_CTWEND BIT27
-/* RTL8188E SDIO Specific */
-#define SDIO_HISR_MCU_ERR BIT28
-#define SDIO_HISR_TSF_BIT32_TOGGLE BIT29
-
#define MASK_SDIO_HISR_CLEAR (SDIO_HISR_TXERR |\
SDIO_HISR_RXERR |\
SDIO_HISR_TXFOVW |\
@@ -1651,39 +1382,13 @@ Current IOREG MAP
#define GPS_HWPDN_SL BIT21 /* GPS HW PDn polarity control */
#define GPS_FUNC_EN BIT22 /* GPS function enable */
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
-#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
-#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
-#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us, said by Tim. */
-
-/* 2 8192D PartNo. */
-#define PARTNO_92D_NIC (BIT7|BIT6)
-#define PARTNO_92D_NIC_REMARK (BIT5|BIT4)
-#define PARTNO_SINGLE_BAND_VS BIT3
-#define PARTNO_SINGLE_BAND_VS_REMARK BIT1
-#define PARTNO_CONCURRENT_BAND_VC (BIT3|BIT2)
-#define PARTNO_CONCURRENT_BAND_VC_REMARK (BIT1|BIT0)
-
/* */
/* General definitions */
/* */
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8188E 176
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8812 255
#define LAST_ENTRY_OF_TX_PKT_BUFFER_8723B 255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8192C 255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-
-/* GPIO BIT */
-#define HAL_8192C_HW_GPIO_WPS_BIT BIT2
-#define HAL_8192EU_HW_GPIO_WPS_BIT BIT7
-#define HAL_8188E_HW_GPIO_WPS_BIT BIT7
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl.h b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
deleted file mode 100644
index 7179591cb01d..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_ioctl.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_IOCTL_H_
-#define _RTW_IOCTL_H_
-
-/* 00 - Success */
-/* 11 - Error */
-#define STATUS_SUCCESS (0x00000000L)
-#define STATUS_PENDING (0x00000103L)
-
-#define STATUS_UNSUCCESSFUL (0xC0000001L)
-#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
-#define STATUS_NOT_SUPPORTED (0xC00000BBL)
-
-#define NDIS_STATUS_SUCCESS ((uint)STATUS_SUCCESS)
-#define NDIS_STATUS_PENDING ((uint)STATUS_PENDING)
-#define NDIS_STATUS_NOT_RECOGNIZED ((uint)0x00010001L)
-#define NDIS_STATUS_NOT_COPIED ((uint)0x00010002L)
-#define NDIS_STATUS_NOT_ACCEPTED ((uint)0x00010003L)
-#define NDIS_STATUS_CALL_ACTIVE ((uint)0x00010007L)
-
-#define NDIS_STATUS_FAILURE ((uint)STATUS_UNSUCCESSFUL)
-#define NDIS_STATUS_RESOURCES ((uint)STATUS_INSUFFICIENT_RESOURCES)
-#define NDIS_STATUS_CLOSING ((uint)0xC0010002L)
-#define NDIS_STATUS_BAD_VERSION ((uint)0xC0010004L)
-#define NDIS_STATUS_BAD_CHARACTERISTICS ((uint)0xC0010005L)
-#define NDIS_STATUS_ADAPTER_NOT_FOUND ((uint)0xC0010006L)
-#define NDIS_STATUS_OPEN_FAILED ((uint)0xC0010007L)
-#define NDIS_STATUS_DEVICE_FAILED ((uint)0xC0010008L)
-#define NDIS_STATUS_MULTICAST_FULL ((uint)0xC0010009L)
-#define NDIS_STATUS_MULTICAST_EXISTS ((uint)0xC001000AL)
-#define NDIS_STATUS_MULTICAST_NOT_FOUND ((uint)0xC001000BL)
-#define NDIS_STATUS_REQUEST_ABORTED ((uint)0xC001000CL)
-#define NDIS_STATUS_RESET_IN_PROGRESS ((uint)0xC001000DL)
-#define NDIS_STATUS_CLOSING_INDICATING ((uint)0xC001000EL)
-#define NDIS_STATUS_NOT_SUPPORTED ((uint)STATUS_NOT_SUPPORTED)
-#define NDIS_STATUS_INVALID_PACKET ((uint)0xC001000FL)
-#define NDIS_STATUS_OPEN_LIST_FULL ((uint)0xC0010010L)
-#define NDIS_STATUS_ADAPTER_NOT_READY ((uint)0xC0010011L)
-#define NDIS_STATUS_ADAPTER_NOT_OPEN ((uint)0xC0010012L)
-#define NDIS_STATUS_NOT_INDICATING ((uint)0xC0010013L)
-#define NDIS_STATUS_INVALID_LENGTH ((uint)0xC0010014L)
-#define NDIS_STATUS_INVALID_DATA ((uint)0xC0010015L)
-#define NDIS_STATUS_BUFFER_TOO_SHORT ((uint)0xC0010016L)
-#define NDIS_STATUS_INVALID_OID ((uint)0xC0010017L)
-#define NDIS_STATUS_ADAPTER_REMOVED ((uint)0xC0010018L)
-#define NDIS_STATUS_UNSUPPORTED_MEDIA ((uint)0xC0010019L)
-#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((uint)0xC001001AL)
-#define NDIS_STATUS_FILE_NOT_FOUND ((uint)0xC001001BL)
-#define NDIS_STATUS_ERROR_READING_FILE ((uint)0xC001001CL)
-#define NDIS_STATUS_ALREADY_MAPPED ((uint)0xC001001DL)
-#define NDIS_STATUS_RESOURCE_CONFLICT ((uint)0xC001001EL)
-#define NDIS_STATUS_NO_CABLE ((uint)0xC001001FL)
-
-#define NDIS_STATUS_INVALID_SAP ((uint)0xC0010020L)
-#define NDIS_STATUS_SAP_IN_USE ((uint)0xC0010021L)
-#define NDIS_STATUS_INVALID_ADDRESS ((uint)0xC0010022L)
-#define NDIS_STATUS_VC_NOT_ACTIVATED ((uint)0xC0010023L)
-#define NDIS_STATUS_DEST_OUT_OF_ORDER ((uint)0xC0010024L) /* cause 27 */
-#define NDIS_STATUS_VC_NOT_AVAILABLE ((uint)0xC0010025L) /* cause 35, 45 */
-#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((uint)0xC0010026L) /* cause 37 */
-#define NDIS_STATUS_INCOMPATABLE_QOS ((uint)0xC0010027L) /* cause 49 */
-#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((uint)0xC0010028L) /* cause 93 */
-#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((uint)0xC0010029L) /* cause 3 */
-
-extern struct iw_handler_def rtw_handlers_def;
-
-#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 757efeb49d08..380d8c9e1239 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -389,7 +389,7 @@ static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state
if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
return NOTIFY_DONE;
- netdev_info(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
+ netdev_dbg(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
state);
return NOTIFY_DONE;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 805dc18fac0a..d5ad49de4c56 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -55,9 +55,9 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
*offset += cnt;
/*
- * Using scatter-gather. We have to go through the list one entry
- * at a time. Each s-g entry contains some number of pages, and
- * each page has to be kmap()'ed separately.
+ * Using scatter-gather. We have to go through the list one entry
+ * at a time. Each s-g entry contains some number of pages which
+ * have to be copied one at a time.
*/
} else {
struct scatterlist *sg =
@@ -92,13 +92,11 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
- unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
- memcpy(ptr + poff, buffer + cnt, plen);
+ memcpy_to_page(page, poff, buffer + cnt, plen);
else
- memcpy(buffer + cnt, ptr + poff, plen);
- kunmap(page);
+ memcpy_from_page(buffer + cnt, page, poff, plen);
/* Start at the beginning of the next page */
poff = 0;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index a7c6eb07b62e..55cb00e8b0d1 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -81,6 +81,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->pvMem =
ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size);
if (!sm750_dev->pvMem) {
+ iounmap(sm750_dev->pvReg);
pr_err("Map video memory failed\n");
ret = -EFAULT;
goto exit;
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
deleted file mode 100644
index c2359de17eaf..000000000000
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ /dev/null
@@ -1,89 +0,0 @@
-This file describes sysfs entries beneath /devices/platform/visorchipset.
-
-What: install/error
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to send the ID of a string that should be displayed on
- s-Par's automatic installation progress screen when an error
- is encountered during installation. This field has no effect
- if not in installation mode.
-Users: sparmaintainer@unisys.com
-
-What: install/remainingsteps
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to set the value of the progress bar on the s-Par automatic
- installation progress screen. This field has no effect if not in
- installation mode.
-Users: sparmaintainer@unisys.com
-
-What: install/textid
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to send the ID of a string that should be displayed on
- s-Par's automatic installation progress screen. Setting this
- field when not in installation mode (boottotool was set on
- the previous guest boot) has no effect.
-Users: sparmaintainer@unisys.com
-
-What: install/boottotool
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: The boottotool flag controls s-Par behavior on the next boot of
- this guest. Setting the flag will cause the guest to boot from
- the utility and installation image, which will use the value in
- the toolaction field to determine what operation is being
- requested.
-Users: sparmaintainer@unisys.com
-
-What: install/toolaction
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This field is used to tell s-Par which type of recovery tool
- action to perform on the next guest boot-up. The meaning of the
- value is dependent on the type of installation software used to
- commission the guest.
-Users: sparmaintainer@unisys.com
-
-What: parahotplug/deviceenabled
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by a Unisys support script installed on the
- guest, and triggered by a udev event. The support script is
- responsible for enabling and disabling SR-IOV devices when the
- PF device is being recovered in another guest.
-
- Some SR-IOV devices have problems when the PF is reset without
- first disabling all VFs attached to that PF. s-Par handles this
- situation by sending a message to guests using these VFs, and
- the script will disable the device. When the PF is recovered,
- another message is sent to the guests to re-enable the VFs.
-
- The parahotplug/deviceenabled interface is used to acknowledge
- the recovery message.
-Users: sparmaintainer@unisys.com
-
-What: parahotplug/devicedisabled
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by a Unisys support script installed on the
- guest, and triggered by a udev event. The support script is
- responsible for enabling and disabling SR-IOV devices when the
- PF device is being recovered in another guest.
-
- Some SR-IOV devices have problems when the PF is reset without
- first disabling all VFs attached to that PF. s-Par handles this
- situation by sending a message to guests using these VFs, and
- the script will disable the device. When the PF is recovered,
- another message is sent to the guests to re-enable the VFs.
-
- The parahotplug/devicedisaabled interface is used to acknowledge
- the initial recovery message.
-Users: sparmaintainer@unisys.com
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
deleted file mode 100644
index cf29f884cbe0..000000000000
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ /dev/null
@@ -1,337 +0,0 @@
-1. Overview
------------
-
-This document describes the driver set for Unisys Secure Partitioning
-(s-Par(R)).
-
-s-Par is firmware that provides hardware partitioning capabilities for
-splitting large-scale Intel x86 servers into multiple isolated
-partitions. s-Par provides a set of para-virtualized device drivers to
-allow guest partitions on the same server to share devices that would
-normally be unsharable, specifically:
-
-* visornic - network interface
-* visorhba - scsi disk adapter
-* visorinput - keyboard and mouse
-
-These drivers conform to the standard Linux bus/device model described
-within Documentation/driver-api/driver-model/, and utilize a driver named
-visorbus to present the virtual busses involved. Drivers in the 'visor*'
-driver set are commonly referred to as "guest drivers" or "client drivers".
-All drivers except visorbus expose a device of a specific usable class to the
-Linux guest environment (e.g., block, network, or input), and are collectively
-referred to as "function drivers".
-
-The back-end for each device is owned and managed by a small,
-single-purpose service partition in the s-Par firmware, which communicates
-with each guest partition sharing that device through an area of shared memory
-called a "channel". In s-Par nomenclature, the back-end is often referred to
-as the "service partition", "IO partition" (for virtual network and scsi disk
-devices), or "console partition" (for virtual keyboard and mouse devices).
-
-Each virtual device requires exactly 1 dedicated channel, which the guest
-driver and back-end use to communicate. The hypervisor need not intervene
-(other than normal interrupt handling) in the interactions that occur across
-this channel.
-
-NOT covered in this document:
-
-* s-Par also supports sharing physical PCI adapters via SR-IOV, but
- because this requires no specific support in the guest partitions, it will
- not be discussed in this document. Shared SR-IOV devices should be used
- wherever possible for highest performance.
-
-* Because the s-Par back-end provides a standard EFI framebuffer to each
- guest, the already-existing efifb Linux driver is used to provide guest
- video access. Thus, the only s-Par-unique support that is necessary to
- provide a guest graphics console are for keyboard and mouse (via visorinput).
-
-
-2. Driver Descriptions
-----------------------
-
-2.1. visorbus
--------------
-
-2.1.1. Overview
----------------
-
-The visorbus driver handles the virtual busses on which all of the virtual
-devices reside. It provides a registration function named
-visorbus_register_visor_driver() that is called by each of the function
-drivers at initialization time, which the function driver uses to tell
-visorbus about the device classes (via specifying a list of device type
-GUIDs) it wants to handle. For use by function drivers, visorbus provides
-implementation for struct visor_driver and struct visor_device, as well
-as utility functions for communicating with the back-end.
-
-visorbus is associated with ACPI id "PNP0A07" in modules.alias, so if built
-as a module it will typically be loaded automatically via standard udev or
-systemd (God help us) configurations.
-
-visorbus can similarly force auto-loading of function drivers for virtual
-devices it discovers, as it includes a MODALIAS environment variable of this
-form in the hotplug uevent environment when each virtual device is
-discovered:
-
- visorbus:<device type GUID>
-
-visorbus notifies each function driver when a device of its registered class
-arrives and departs, by calling the function driver's probe() and remove()
-methods.
-
-The actual struct device objects that correspond to each virtual bus and
-each virtual device are created and owned by visorbus. These device objects
-are created in response to messages from the s-Par back-end received on a
-special control channel called the "controlvm channel" (each guest partition
-has access to exactly 1 controlvm channel), and have a lifetime that is
-independent of the function drivers that control them.
-
-2.1.2. "struct visor device" Function Driver Interfaces
--------------------------------------------------------
-
-The interface between visorbus and its function drivers is defined in
-visorbus.h, and described below.
-
-When a visor function driver loads, it calls visorbus_register_visor_driver()
-to register itself with visorbus. The significant information passed in this
-exchange is as follows:
-
-* the GUID(s) of the channel type(s) that are handled by this driver, as
- well as a "friendly name" identifying each (this will be published under
- /sys/devices/visorbus<x>/dev<y>)
-
-* the addresses of callback functions to be called whenever a virtual
- device/channel with the appropriate channel-type GUID(s) appears or
- disappears
-
-* the address of a "channel_interrupt" function, which will be automatically
- called at specific intervals to enable the driver to poll the device
- channel for activity
-
-The following functions implemented within each function driver will be
-called automatically by the visorbus driver at appropriate times:
-
-* The probe() function notifies about the creation of each new virtual
- device/channel instance.
-
-* The remove() function notifies about the destruction of a virtual
- device/channel instance.
-
-* The channel_interrupt() function is called at frequent intervals to
- give the function driver an opportunity to poll the virtual device channel
- for requests. Information is passed to this function to enable the
- function driver to use the visorchannel_signalinsert() and
- visorchannel_signalremove() functions to respond to and initiate activity
- over the channel. (Note that since it is the visorbus driver that
- determines when this is called, it is very easy to switch to
- interrupt-driven mechanisms when available for particular virtual device
- types.)
-
-* The pause() function is called should it ever be necessary to direct the
- function driver to temporarily stop accessing the device channel. An
- example of when this is needed is when the service partition implementing
- the back-end of the virtual device needs to be recovered. After a
- successful return of pause(), the function driver must not access the
- device channel until a subsequent resume() occurs.
-
-* The resume() function is the "book-end" to pause(), and is described above.
-
-2.1.3. sysfs Advertised Information
------------------------------------
-
-Because visorbus is a standard Linux bus driver in the model described in
-Documentation/driver-api/driver-model/, the hierarchy of s-Par virtual devices is
-published in the sysfs tree beneath /bus/visorbus/, e.g.,
-/sys/bus/visorbus/devices/ might look like:
-
- vbus1:dev1 -> ../../../devices/visorbus1/vbus1:dev1
- vbus1:dev2 -> ../../../devices/visorbus1/vbus1:dev2
- vbus1:dev3 -> ../../../devices/visorbus1/vbus1:dev3
- vbus2:dev0 -> ../../../devices/visorbus2/vbus2:dev0
- vbus2:dev1 -> ../../../devices/visorbus2/vbus2:dev1
- vbus2:dev2 -> ../../../devices/visorbus2/vbus2:dev2
- visorbus1 -> ../../../devices/visorbus1
- visorbus2 -> ../../../devices/visorbus2
-
-visor_device notes:
-
-* Each visorbus<n> entry denotes the existence of a struct visor_device
- denoting virtual bus #<n>. A unique s-Par channel exists for each such
- virtual bus.
-
-* Virtual bus numbers uniquely identify s-Par back-end service partitions.
- In this example, bus 1 corresponds to the s-Par console partition
- (controls keyboard, video, and mouse), whereas bus 2 corresponds to the
- s-Par IO partition (controls network and disk).
-
-* Each vbus<x>:dev<y> entry denotes the existence of a struct visor_device
- denoting virtual device #<y> outboard of virtual bus #<x>. A unique s-Par
- channel exists for each such virtual device.
-
-* If a function driver has loaded and claimed a particular device, the
- bus/visorbus/devices/vbus<x>:dev<y>/driver symlink will indicate that
- function driver.
-
-Every active visorbus device will have a sysfs subtree under:
-
- /sys/devices/visorbus<x>/vbus<x>:dev<y>/
-
-The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
-
- subsystem link to sysfs tree that describes the
- visorbus bus type; e.g.:
- ../../../bus/visorbus
-
- driver link to sysfs tree that describes the
- function driver controlling this device;
- e.g.:
- ../../../bus/visorbus/drivers/visorhba
- Note that this "driver" link will not exist
- if the appropriate function driver has not
- been loaded yet.
-
- channel properties of the device channel (all in
- ascii text format)
-
- clientpartition handle identifying the guest (client) side
- of this channel, e.g. 0x10000000.
-
- nbytes total size of this channel in bytes
-
- physaddr the guest physical address for the base of
- the channel
-
- typeguid a GUID identifying the channel type, in
- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
- typename a "friendly name" for this channel type, e.g.,
- "keyboard". Note that this name is provided by
- a particular function driver, so "typename"
- will return an empty string until AFTER the
- appropriate function driver controlling this
- channel type is loaded
-
- zoneguid a GUID identifying the channel zone, in
- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
-
-2.2. visorhba
--------------
-
-The visorhba driver registers with visorbus as the function driver to
-handle virtual scsi disk devices, specified using the
-VISOR_VHBA_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visorhba uses scsi_add_host() to expose a Linux block device
-(e.g., /sys/block/) in the guest environment for each s-Par virtual device.
-
-visorhba provides access to a shared SCSI host bus adapter and one or more
-disk devices, by proxying SCSI commands between the guest and the service
-partition that owns the shared SCSI adapter, using a channel between the
-guest and the service partition. The disks that appear on the shared bus
-are defined by the s-Par configuration and enforced by the service partition,
-while the guest driver handles sending commands and handling responses. Each
-disk is shared as a whole to a guest. Sharing the bus adapter in this way
-provides resiliency; should the device encounter an error, only the service
-partition is rebooted, and the device is reinitialized. This allows
-guests to continue running and to recover from the error.
-
-When compiled as a module, visorhba can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_VHBA_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:414815ed-c58c-11da-95a9-00e08161165f visorhba
-
-
-2.3. visornic
--------------
-
-The visornic driver registers with visorbus as the function driver to
-handle virtual network devices, specified using the
-VISOR_VNIC_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visornic uses register_netdev() to expose a Linux device of class net
-(e.g., /sys/class/net/) in the guest environment for each s-Par virtual
-device.
-
-visornic provides a paravirtualized network interface to a
-guest by proxying buffer information between the guest and the service
-partition that owns the shared network interface, using a channel
-between the guest and the service partition. The connectivity of this
-interface with the shared interface and possibly other guest
-partitions is defined by the s-Par configuration and enforced by the
-service partition; the guest driver handles communication and link
-status.
-
-When compiled as a module, visornic can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_VNIC_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:8cd5994d-c58e-11da-95a9-00e08161165f visornic
-
-
-2.4. visorinput
----------------
-
-The visorinput driver registers with visorbus as the function driver to
-handle human input devices, specified using the
-VISOR_KEYBOARD_CHANNEL_GUID and VISOR_MOUSE_CHANNEL_GUID
-types in the visorbus_register_visor_driver() call. visorinput uses
-input_register_device() to expose devices of class input
-(e.g., /sys/class/input/) for virtual keyboard and virtual mouse devices.
-A s-Par virtual keyboard device maps 1-to-1 with a Linux input device
-named "visor Keyboard", while a s-Par virtual mouse device has 2 Linux input
-devices created for it: 1 named "visor Wheel", and 1 named "visor Mouse".
-
-By registering as input class devices, modern versions of X will
-automatically find and properly use s-Par virtual keyboard and mouse devices.
-As the s-Par back-end reports keyboard and mouse activity via events on the
-virtual device channel, the visorinput driver delivers the activity to the
-Linux environment by calling input_report_key() and input_report_abs().
-
-You can interact with the guest console using the usyscon Partition Desktop
-(a.k.a., "pd") application, provided as part of s-Par. After installing the
-usyscon Partition Desktop into a Linux environment via the
-usyscon_partitiondesktop-*.rpm, or into a Windows environment via
-PartitionDesktop.msi, you will be able to launch a console for your guest
-Linux environment by clicking the console icon in the s-Par web UI.
-
-When compiled as a module, visorinput can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_MOUSE_CHANNEL_GUID_STR
- "visorbus:"+VISOR_KEYBOARD_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:c73416d0-b0b8-44af-b304-9d2ae99f1b3d visorinput
- alias visorbus:addf07d4-94a9-46e2-81c3-61abcdbdbd87 visorinput
-
-
-3. Minimum Required Driver Set
-------------------------------
-
-visorbus is required for every Linux guest running under s-Par.
-
-visorhba is typically required for a Linux guest running under s-Par, as it
-is required if your guest boot disk is a virtual device provided by the s-Par
-back-end, which is the default configuration. However, for advanced
-configurations where the Linux guest boots via an SR-IOV-provided HBA or
-SAN disk for example, visorhba is not technically required.
-
-visornic is typically required for a Linux guest running under s-Par, as it
-is required if your guest network interface is a virtual device provided by
-the s-Par back-end, which is the default configuration. However, for
-configurations where the Linux guest is provided with an SR-IOV NIC
-for example, visornic is not technically required.
-
-visorinput is only required for a Linux guest running under s-Par if you
-require graphics-mode access to your guest console.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
deleted file mode 100644
index 43fe1ce538e1..000000000000
--- a/drivers/staging/unisys/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys SPAR driver configuration
-#
-menuconfig UNISYSSPAR
- bool "Unisys SPAR driver support"
- help
- Support for the Unisys SPAR drivers
-
-if UNISYSSPAR
-
-source "drivers/staging/unisys/visornic/Kconfig"
-source "drivers/staging/unisys/visorinput/Kconfig"
-source "drivers/staging/unisys/visorhba/Kconfig"
-
-endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
deleted file mode 100644
index aaddc619c329..000000000000
--- a/drivers/staging/unisys/MAINTAINERS
+++ /dev/null
@@ -1,5 +0,0 @@
-Unisys s-Par drivers
-M: David Kershner <sparmaintainer@unisys.com>
-S: Maintained
-F: drivers/staging/unisys/Documentation/overview.txt
-F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
deleted file mode 100644
index c0f76cc196a6..000000000000
--- a/drivers/staging/unisys/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys SPAR drivers
-#
-obj-$(CONFIG_UNISYS_VISORNIC) += visornic/
-obj-$(CONFIG_UNISYS_VISORINPUT) += visorinput/
-obj-$(CONFIG_UNISYS_VISORHBA) += visorhba/
diff --git a/drivers/staging/unisys/TODO b/drivers/staging/unisys/TODO
deleted file mode 100644
index d863f266bf76..000000000000
--- a/drivers/staging/unisys/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-TODO:
- - enhance visornic to use channel_interrupt() hook instead of a
- kernel thread
- - enhance visorhba to use channel_interrupt() hook instead of a
- kernel thread
- - teach visorbus to handle virtual interrupts triggered by s-Par
- back-end, and call function driver's channel_interrupt() function
- when they occur
- - enhance debugfs interfaces (e.g., per device, etc.)
- - upgrade/remove deprecated workqueue operations
- - move individual drivers into proper driver subsystems
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Ken Cox <jkc@redhat.com>
- Unisys s-Par maintainer mailing list <sparmaintainer@unisys.com>
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
deleted file mode 100644
index 9ef812c0bc42..000000000000
--- a/drivers/staging/unisys/include/iochannel.h
+++ /dev/null
@@ -1,571 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2010 - 2016 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __IOCHANNEL_H__
-#define __IOCHANNEL_H__
-
-/*
- * Everything needed for IOPart-GuestPart communication is define in
- * this file. Note: Everything is OS-independent because this file is
- * used by Windows, Linux and possible EFI drivers.
- *
- * Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state. The following states are currently being used:
- * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
- *
- * Additional states will be used later. No locking is needed to switch between
- * states due to the following rules:
- *
- * 1. IOPart is only the only partition allowed to change from UNIT
- * 2. IOPart is only the only partition allowed to change from
- * CHANNEL_ATTACHING
- * 3. GuestPart is only the only partition allowed to change from
- * CHANNEL_ATTACHED
- *
- * The state changes are the following: IOPart sees the channel is in UNINIT,
- * UNINIT -> CHANNEL_ATTACHING (performed only by IOPart)
- * CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart)
- * CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart)
- */
-
-#include <linux/uuid.h>
-#include <linux/skbuff.h>
-#include <linux/visorbus.h>
-
-/*
- * Must increment these whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct without needing to
- * increment this.
- */
-#define VISOR_VHBA_CHANNEL_VERSIONID 2
-#define VISOR_VNIC_CHANNEL_VERSIONID 2
-
-/*
- * Everything necessary to handle SCSI & NIC traffic between Guest Partition and
- * IO Partition is defined below.
- */
-
-/*
- * Define the two queues per data channel between iopart and ioguestparts.
- * IOCHAN_TO_IOPART -- used by guest to 'insert' signals to iopart.
- * IOCHAN_FROM_IOPART -- used by guest to 'remove' signals from IO part.
- */
-#define IOCHAN_TO_IOPART 0
-#define IOCHAN_FROM_IOPART 1
-
-/* Size of cdb - i.e., SCSI cmnd */
-#define MAX_CMND_SIZE 16
-
-/* Unisys-specific DMA direction values */
-enum uis_dma_data_direction {
- UIS_DMA_BIDIRECTIONAL = 0,
- UIS_DMA_TO_DEVICE = 1,
- UIS_DMA_FROM_DEVICE = 2,
- UIS_DMA_NONE = 3
-};
-
-#define MAX_SENSE_SIZE 64
-#define MAX_PHYS_INFO 64
-
-/*
- * enum net_types - Various types of network packets that can be sent in cmdrsp.
- * @NET_RCV_POST: Submit buffer to hold receiving incoming packet.
- * @NET_RCV: visornic -> uisnic. Incoming packet received.
- * @NET_XMIT: uisnic -> visornic. For outgoing packet.
- * @NET_XMIT_DONE: visornic -> uisnic. Outgoing packet xmitted.
- * @NET_RCV_ENBDIS: uisnic -> visornic. Enable/Disable packet reception.
- * @NET_RCV_ENBDIS_ACK: visornic -> uisnic. Acknowledge enable/disable packet.
- * @NET_RCV_PROMISC: uisnic -> visornic. Enable/Disable promiscuous mode.
- * @NET_CONNECT_STATUS: visornic -> uisnic. Indicate the loss or restoration of
- * a network connection.
- * @NET_MACADDR: uisnic -> visornic. Indicates the client has requested
- * to update it's MAC address.
- * @NET_MACADDR_ACK: MAC address acknowledge.
- */
-enum net_types {
- NET_RCV_POST = 0,
- NET_RCV,
- NET_XMIT,
- NET_XMIT_DONE,
- NET_RCV_ENBDIS,
- NET_RCV_ENBDIS_ACK,
- /* Reception */
- NET_RCV_PROMISC,
- NET_CONNECT_STATUS,
- NET_MACADDR,
- NET_MACADDR_ACK,
-};
-
-/* Minimum eth data size */
-#define ETH_MIN_DATA_SIZE 46
-#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-
-/* Maximum data size */
-#define VISOR_ETH_MAX_MTU 16384
-
-#ifndef MAX_MACADDR_LEN
-/* Number of bytes in MAC address */
-#define MAX_MACADDR_LEN 6
-#endif
-
-/* Various types of scsi task mgmt commands. */
-enum task_mgmt_types {
- TASK_MGMT_ABORT_TASK = 1,
- TASK_MGMT_BUS_RESET,
- TASK_MGMT_LUN_RESET,
- TASK_MGMT_TARGET_RESET,
-};
-
-/* Various types of vdisk mgmt commands. */
-enum vdisk_mgmt_types {
- VDISK_MGMT_ACQUIRE = 1,
- VDISK_MGMT_RELEASE,
-};
-
-struct phys_info {
- u64 pi_pfn;
- u16 pi_off;
- u16 pi_len;
-} __packed;
-
-#define MIN_NUMSIGNALS 64
-
-/* Structs with pragma pack. */
-
-struct guest_phys_info {
- u64 address;
- u64 length;
-} __packed;
-
-/*
- * struct uisscsi_dest
- * @channel: Bus number.
- * @id: Target number.
- * @lun: Logical unit number.
- */
-struct uisscsi_dest {
- u32 channel;
- u32 id;
- u32 lun;
-} __packed;
-
-struct vhba_wwnn {
- u32 wwnn1;
- u32 wwnn2;
-} __packed;
-
-/*
- * struct vhba_config_max
- * @max_channel: Maximum channel for devices attached to this bus.
- * @max_id: Maximum SCSI ID for devices attached to bus.
- * @max_lun: Maximum SCSI LUN for devices attached to bus.
- * @cmd_per_lun: Maximum number of outstanding commands per LUN.
- * @max_io_size: Maximum io size for devices attached to this bus. Max io size
- * is often determined by the resource of the hba.
- * e.g Max scatter gather list length * page size / sector size.
- *
- * WARNING: Values stored in this structure must contain maximum counts (not
- * maximum values).
- *
- * 20 bytes
- */
-struct vhba_config_max {
- u32 max_channel;
- u32 max_id;
- u32 max_lun;
- u32 cmd_per_lun;
- u32 max_io_size;
-} __packed;
-
-/*
- * struct uiscmdrsp_scsi
- *
- * @handle: The handle to the cmd that was received. Send it back as
- * is in the rsp packet.
- * @cmnd: The cdb for the command.
- * @bufflen: Length of data to be transferred out or in.
- * @guest_phys_entries: Number of entries in scatter-gather list.
- * @struct gpi_list: Physical address information for each fragment.
- * @data_dir: Direction of the data, if any.
- * @struct vdest: Identifies the virtual hba, id, channel, lun to which
- * cmd was sent.
- * @linuxstat: Original Linux status used by Linux vdisk.
- * @scsistat: The scsi status.
- * @addlstat: Non-scsi status.
- * @sensebuf: Sense info in case cmd failed. sensebuf holds the
- * sense_data struct. See sense_data struct for more
- * details.
- * @*vdisk: Pointer to the vdisk to clean up when IO completes.
- * @no_disk_result: Used to return no disk inquiry result when
- * no_disk_result is set to 1
- * scsi.scsistat is SAM_STAT_GOOD
- * scsi.addlstat is 0
- * scsi.linuxstat is SAM_STAT_GOOD
- * That is, there is NO error.
- */
-struct uiscmdrsp_scsi {
- u64 handle;
- u8 cmnd[MAX_CMND_SIZE];
- u32 bufflen;
- u16 guest_phys_entries;
- struct guest_phys_info gpi_list[MAX_PHYS_INFO];
- u32 data_dir;
- struct uisscsi_dest vdest;
- /* Needed to queue the rsp back to cmd originator. */
- int linuxstat;
- u8 scsistat;
- u8 addlstat;
-#define ADDL_SEL_TIMEOUT 4
- /* The following fields are need to determine the result of command. */
- u8 sensebuf[MAX_SENSE_SIZE];
- void *vdisk;
- int no_disk_result;
-} __packed;
-
-/*
- * Defines to support sending correct inquiry result when no disk is
- * configured.
- *
- * From SCSI SPC2 -
- *
- * If the target is not capable of supporting a device on this logical unit, the
- * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
- * and PERIPHERAL DEVICE TYPE set to 1Fh).
- *
- * The device server is capable of supporting the specified peripheral device
- * type on this logical unit. However, the physical device is not currently
- * connected to this logical unit.
- */
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
- * and revision. Yikes! So let us always send back 36 bytes, the minimum for
- * inquiry result.
- */
-#define NO_DISK_INQUIRY_RESULT_LEN 36
-/* 5 bytes minimum for inquiry result */
-#define MIN_INQUIRY_RESULT_LEN 5
-
-/* SCSI device version for no disk inquiry result */
-/* indicates SCSI SPC2 (SPC3 is 5) */
-#define SCSI_SPC2_VER 4
-
-/* Struct and Defines to support sense information. */
-
-/*
- * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
- * initialized in exactly the manner that is recommended in Windows (hence the
- * odd values).
- * When set, these fields will have the following values:
- * ErrorCode = 0x70 indicates current error
- * Valid = 1 indicates sense info is valid
- * SenseKey contains sense key as defined by SCSI specs.
- * AdditionalSenseCode contains sense key as defined by SCSI specs.
- * AdditionalSenseCodeQualifier contains qualifier to sense code as defined by
- * scsi docs.
- * AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
- */
-struct sense_data {
- u8 errorcode:7;
- u8 valid:1;
- u8 segment_number;
- u8 sense_key:4;
- u8 reserved:1;
- u8 incorrect_length:1;
- u8 end_of_media:1;
- u8 file_mark:1;
- u8 information[4];
- u8 additional_sense_length;
- u8 command_specific_information[4];
- u8 additional_sense_code;
- u8 additional_sense_code_qualifier;
- u8 fru_code;
- u8 sense_key_specific[3];
-} __packed;
-
-/*
- * struct net_pkt_xmt
- * @len: Full length of data in the packet.
- * @num_frags: Number of fragments in frags containing data.
- * @struct phys_info frags: Physical page information.
- * @ethhdr: The ethernet header.
- * @struct lincsum: These are needed for csum at uisnic end.
- * @valid: 1 = struct is valid - else ignore.
- * @hrawoffv: 1 = hwrafoff is valid.
- * @nhrawoffv: 1 = nhwrafoff is valid.
- * @protocol: Specifies packet protocol.
- * @csum: Value used to set skb->csum at IOPart.
- * @hrawoff: Value used to set skb->h.raw at IOPart. hrawoff points to
- * the start of the TRANSPORT LAYER HEADER.
- * @nhrawoff: Value used to set skb->nh.raw at IOPart. nhrawoff points to
- * the start of the NETWORK LAYER HEADER.
- *
- * NOTE:
- * The full packet is described in frags but the ethernet header is separately
- * kept in ethhdr so that uisnic doesn't have "MAP" the guest memory to get to
- * the header. uisnic needs ethhdr to determine how to route the packet.
- */
-struct net_pkt_xmt {
- int len;
- int num_frags;
- struct phys_info frags[MAX_PHYS_INFO];
- char ethhdr[ETH_HLEN];
- struct {
- u8 valid;
- u8 hrawoffv;
- u8 nhrawoffv;
- __be16 protocol;
- __wsum csum;
- u32 hrawoff;
- u32 nhrawoff;
- } lincsum;
-} __packed;
-
-struct net_pkt_xmtdone {
- /* Result of NET_XMIT */
- u32 xmt_done_result;
-} __packed;
-
-/*
- * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
- * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
- * Use 1 full cache line size for "overhead" so that transfers are optimized.
- * IOVM requires that a buffer be represented by 1 phys_info structure
- * which can only cover page_size.
- */
-#define RCVPOST_BUF_SIZE 4032
-#define MAX_NET_RCV_CHAIN \
- ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
- / RCVPOST_BUF_SIZE)
-
-/* rcv buf size must be large enough to include ethernet data len + ethernet
- * header len - we are choosing 2K because it is guaranteed to be describable.
- */
-struct net_pkt_rcvpost {
- /* Physical page information for the single fragment 2K rcv buf */
- struct phys_info frag;
- /*
- * Ensures that receive posts are returned to the adapter which we sent
- * them from originally.
- */
- u64 unique_num;
-
-} __packed;
-
-/*
- * struct net_pkt_rcv
- * @rcv_done_len: Length of the received data.
- * @numrcvbufs: Contains the incoming data. Guest side MUST chain these
- * together.
- * @*rcvbuf: List of chained rcvbufa. Each entry is a receive buffer
- * provided by NET_RCV_POST. NOTE: First rcvbuf in the
- * chain will also be provided in net.buf.
- * @unique_num:
- * @rcvs_dropped_delta:
- *
- * The number of rcvbuf that can be chained is based on max mtu and size of each
- * rcvbuf.
- */
-struct net_pkt_rcv {
- u32 rcv_done_len;
- u8 numrcvbufs;
- void *rcvbuf[MAX_NET_RCV_CHAIN];
- u64 unique_num;
- u32 rcvs_dropped_delta;
-} __packed;
-
-struct net_pkt_enbdis {
- void *context;
- /* 1 = enable, 0 = disable */
- u16 enable;
-} __packed;
-
-struct net_pkt_macaddr {
- void *context;
- /* 6 bytes */
- u8 macaddr[MAX_MACADDR_LEN];
-} __packed;
-
-/*
- * struct uiscmdrsp_net - cmd rsp packet used for VNIC network traffic.
- * @enum type:
- * @*buf:
- * @union:
- * @struct xmt: Used for NET_XMIT.
- * @struct xmtdone: Used for NET_XMIT_DONE.
- * @struct rcvpost: Used for NET_RCV_POST.
- * @struct rcv: Used for NET_RCV.
- * @struct enbdis: Used for NET_RCV_ENBDIS, NET_RCV_ENBDIS_ACK,
- * NET_RCV_PROMSIC, and NET_CONNECT_STATUS.
- * @struct macaddr:
- */
-struct uiscmdrsp_net {
- enum net_types type;
- void *buf;
- union {
- struct net_pkt_xmt xmt;
- struct net_pkt_xmtdone xmtdone;
- struct net_pkt_rcvpost rcvpost;
- struct net_pkt_rcv rcv;
- struct net_pkt_enbdis enbdis;
- struct net_pkt_macaddr macaddr;
- };
-} __packed;
-
-/*
- * struct uiscmdrsp_scsitaskmgmt
- * @enum tasktype: The type of task.
- * @struct vdest: The vdisk for which this task mgmt is generated.
- * @handle: This is a handle that the guest has saved off for its
- * own use. The handle value is preserved by iopart and
- * returned as in task mgmt rsp.
- * @notify_handle: For Linux guests, this is a pointer to wait_queue_head
- * that a thread is waiting on to see if the taskmgmt
- * command has completed. When the rsp is received by
- * guest, the thread receiving the response uses this to
- * notify the thread waiting for taskmgmt command
- * completion. It's value is preserved by iopart and
- * returned as in the task mgmt rsp.
- * @notifyresult_handle: This is a handle to the location in the guest where
- * the result of the taskmgmt command (result field) is
- * saved to when the response is handled. It's value is
- * preserved by iopart and returned as is in the task mgmt
- * rsp.
- * @result: Result of taskmgmt command - set by IOPart.
- */
-struct uiscmdrsp_scsitaskmgmt {
- enum task_mgmt_types tasktype;
- struct uisscsi_dest vdest;
- u64 handle;
- u64 notify_handle;
- u64 notifyresult_handle;
- char result;
-
-#define TASK_MGMT_FAILED 0
-} __packed;
-
-/*
- * struct uiscmdrsp_disknotify - Used by uissd to send disk add/remove
- * notifications to Guest.
- * @add: 0-remove, 1-add.
- * @*v_hba: Channel info to route msg.
- * @channel: SCSI Path of Disk to added or removed.
- * @id: SCSI Path of Disk to added or removed.
- * @lun: SCSI Path of Disk to added or removed.
- *
- * Note that the vHba pointer is not used by the Client/Guest side.
- */
-struct uiscmdrsp_disknotify {
- u8 add;
- void *v_hba;
- u32 channel, id, lun;
-} __packed;
-
-/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
-struct uiscmdrsp {
- char cmdtype;
- /* Describes what type of information is in the struct */
-#define CMD_SCSI_TYPE 1
-#define CMD_NET_TYPE 2
-#define CMD_SCSITASKMGMT_TYPE 3
-#define CMD_NOTIFYGUEST_TYPE 4
- union {
- struct uiscmdrsp_scsi scsi;
- struct uiscmdrsp_net net;
- struct uiscmdrsp_scsitaskmgmt scsitaskmgmt;
- struct uiscmdrsp_disknotify disknotify;
- };
- /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
- void *private_data;
- /* General Purpose Queue Link */
- struct uiscmdrsp *next;
- /* Pointer to the nextactive commands */
- struct uiscmdrsp *activeQ_next;
- /* Pointer to the prevactive commands */
- struct uiscmdrsp *activeQ_prev;
-} __packed;
-
-/* total = 28 bytes */
-struct iochannel_vhba {
- /* 8 bytes */
- struct vhba_wwnn wwnn;
- /* 20 bytes */
- struct vhba_config_max max;
-} __packed;
-
-struct iochannel_vnic {
- /* 6 bytes */
- u8 macaddr[6];
- /* 4 bytes */
- u32 num_rcv_bufs;
- /* 4 bytes */
- u32 mtu;
- /* 16 bytes */
- guid_t zone_guid;
-} __packed;
-
-/*
- * This is just the header of the IO channel. It is assumed that directly after
- * this header there is a large region of memory which contains the command and
- * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
- */
-struct visor_io_channel {
- struct channel_header channel_header;
- struct signal_queue_header cmd_q;
- struct signal_queue_header rsp_q;
- union {
- struct iochannel_vhba vhba;
- struct iochannel_vnic vnic;
- } __packed;
-
-#define MAX_CLIENTSTRING_LEN 1024
- /* client_string is NULL termimated so holds max-1 bytes */
- u8 client_string[MAX_CLIENTSTRING_LEN];
-} __packed;
-
-/* INLINE functions for initializing and accessing I/O data channels. */
-#define SIZEOF_CMDRSP (64 * DIV_ROUND_UP(sizeof(struct uiscmdrsp), 64))
-
-/* Use 4K page sizes when passing page info between Guest and IOPartition. */
-#define PI_PAGE_SIZE 0x1000
-#define PI_PAGE_MASK 0x0FFF
-
-/* __IOCHANNEL_H__ */
-#endif
diff --git a/drivers/staging/unisys/visorhba/Kconfig b/drivers/staging/unisys/visorhba/Kconfig
deleted file mode 100644
index ed59ac11c322..000000000000
--- a/drivers/staging/unisys/visorhba/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorhba configuration
-#
-
-config UNISYS_VISORHBA
- tristate "Unisys visorhba driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && SCSI
- help
- The Unisys visorhba driver provides support for s-Par HBA
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a HBA device, the probe function of
- visorhba is called to create the scsi device.
- If you say Y here, you will enable the Unisys visorhba driver.
-
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
deleted file mode 100644
index b613a7dcdae9..000000000000
--- a/drivers/staging/unisys/visorhba/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORHBA) += visorhba.o
-
-visorhba-y := visorhba_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
deleted file mode 100644
index 48aa18f8b984..000000000000
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ /dev/null
@@ -1,1142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/debugfs.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/visorbus.h>
-#include <linux/xarray.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "iochannel.h"
-
-/* The Send and Receive Buffers of the IO Queue may both be full */
-
-#define IOS_ERROR_THRESHOLD 1000
-#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
-#define VISORHBA_ERROR_COUNT 30
-
-static struct dentry *visorhba_debugfs_dir;
-
-/* GUIDS for HBA channel type supported by this driver */
-static struct visor_channeltype_descriptor visorhba_channel_types[] = {
- /* Note that the only channel type we expect to be reported by the
- * bus driver is the VISOR_VHBA channel.
- */
- { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
- VISOR_VHBA_CHANNEL_VERSIONID },
- {}
-};
-
-MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
-MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
-
-struct visordisk_info {
- struct scsi_device *sdev;
- u32 valid;
- atomic_t ios_threshold;
- atomic_t error_count;
- struct visordisk_info *next;
-};
-
-struct scsipending {
- struct uiscmdrsp cmdrsp;
- /* The Data being tracked */
- void *sent;
- /* Type of pointer that is being stored */
- char cmdtype;
-};
-
-/* Each scsi_host has a host_data area that contains this struct. */
-struct visorhba_devdata {
- struct Scsi_Host *scsihost;
- struct visor_device *dev;
- struct list_head dev_info_list;
- /* Tracks the requests that have been forwarded to
- * the IOVM and haven't returned yet
- */
- struct scsipending pending[MAX_PENDING_REQUESTS];
- /* Start search for next pending free slot here */
- unsigned int nextinsert;
- /* lock to protect data in devdata */
- spinlock_t privlock;
- bool serverdown;
- bool serverchangingstate;
- unsigned long long acquire_failed_cnt;
- unsigned long long interrupts_rcvd;
- unsigned long long interrupts_notme;
- unsigned long long interrupts_disabled;
- u64 __iomem *flags_addr;
- struct visordisk_info head;
- unsigned int max_buff_len;
- int devnum;
- struct uiscmdrsp *cmdrsp;
- /*
- * allows us to pass int handles back-and-forth between us and
- * iovm, instead of raw pointers
- */
- struct xarray xa;
- struct dentry *debugfs_dir;
- struct dentry *debugfs_info;
-};
-
-struct visorhba_devices_open {
- struct visorhba_devdata *devdata;
-};
-
-/*
- * add_scsipending_entry - Save off io command that is pending in
- * Service Partition
- * @devdata: Pointer to devdata
- * @cmdtype: Specifies the type of command pending
- * @new: The command to be saved
- *
- * Saves off the io command that is being handled by the Service
- * Partition so that it can be handled when it completes. If new is
- * NULL it is assumed the entry refers only to the cmdrsp.
- *
- * Return: Insert_location where entry was added on success,
- * -EBUSY if it can't
- */
-static int add_scsipending_entry(struct visorhba_devdata *devdata,
- char cmdtype, void *new)
-{
- unsigned long flags;
- struct scsipending *entry;
- int insert_location;
-
- spin_lock_irqsave(&devdata->privlock, flags);
- insert_location = devdata->nextinsert;
- while (devdata->pending[insert_location].sent) {
- insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
- if (insert_location == (int)devdata->nextinsert) {
- spin_unlock_irqrestore(&devdata->privlock, flags);
- return -EBUSY;
- }
- }
-
- entry = &devdata->pending[insert_location];
- memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
- entry->cmdtype = cmdtype;
- if (new)
- entry->sent = new;
- /* wants to send cmdrsp */
- else
- entry->sent = &entry->cmdrsp;
- devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- return insert_location;
-}
-
-/*
- * del_scsipending_ent - Removes an entry from the pending array
- * @devdata: Device holding the pending array
- * @del: Entry to remove
- *
- * Removes the entry pointed at by del and returns it.
- *
- * Return: The scsipending entry pointed to on success, NULL on failure
- */
-static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
-{
- unsigned long flags;
- void *sent;
-
- if (del >= MAX_PENDING_REQUESTS)
- return NULL;
-
- spin_lock_irqsave(&devdata->privlock, flags);
- sent = devdata->pending[del].sent;
- devdata->pending[del].cmdtype = 0;
- devdata->pending[del].sent = NULL;
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- return sent;
-}
-
-/*
- * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
- * @ddata: Device holding the pending array
- * @ent: Entry that stores the cmdrsp
- *
- * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
- * if the "sent" field is not NULL.
- *
- * Return: A pointer to the cmdrsp, NULL on failure
- */
-static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
- int ent)
-{
- if (ddata->pending[ent].sent)
- return &ddata->pending[ent].cmdrsp;
-
- return NULL;
-}
-
-/*
- * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
- * completion processing logic for a taskmgmt
- * cmd will be able to find who to wake up
- * and where to stash the result
- * @xa: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- * @event: The event handle to associate with an id
- * @result: The location to place the result of the event handle into
- */
-static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp,
- wait_queue_head_t *event, int *result)
-{
- int ret;
- u32 id;
-
- /* specify the event that has to be triggered when this cmd is complete */
- ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL);
- if (ret)
- return ret;
- cmdrsp->scsitaskmgmt.notify_handle = id;
- ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL);
- if (ret) {
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
- return ret;
- }
- cmdrsp->scsitaskmgmt.notifyresult_handle = id;
-
- return 0;
-}
-
-/*
- * cleanup_scsitaskmgmt_handles - Forget handles created by
- * setup_scsitaskmgmt_handles()
- * @xa: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- */
-static void cleanup_scsitaskmgmt_handles(struct xarray *xa,
- struct uiscmdrsp *cmdrsp)
-{
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
-}
-
-/*
- * forward_taskmgmt_command - Send taskmegmt command to the Service
- * Partition
- * @tasktype: Type of taskmgmt command
- * @scsidev: Scsidev that issued command
- *
- * Create a cmdrsp packet and send it to the Service Partition
- * that will service this request.
- *
- * Return: Int representing whether command was queued successfully or not
- */
-static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
- struct scsi_device *scsidev)
-{
- struct uiscmdrsp *cmdrsp;
- struct visorhba_devdata *devdata =
- (struct visorhba_devdata *)scsidev->host->hostdata;
- int notifyresult = 0xffff;
- wait_queue_head_t notifyevent;
- int scsicmd_id;
- int ret;
-
- if (devdata->serverdown || devdata->serverchangingstate)
- return FAILED;
-
- scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
- NULL);
- if (scsicmd_id < 0)
- return FAILED;
-
- cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
-
- init_waitqueue_head(&notifyevent);
-
- /* issue TASK_MGMT_ABORT_TASK */
- cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
-
- ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp,
- &notifyevent, &notifyresult);
- if (ret) {
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
- return FAILED;
- }
-
- /* save destination */
- cmdrsp->scsitaskmgmt.tasktype = tasktype;
- cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
- cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
- cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
- cmdrsp->scsitaskmgmt.handle = scsicmd_id;
-
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: initiating type=%d taskmgmt command\n", tasktype);
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
- goto err_del_scsipending_ent;
-
- /* It can take the Service Partition up to 35 seconds to complete
- * an IO in some cases, so wait 45 seconds and error out
- */
- if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
- msecs_to_jiffies(45000)))
- goto err_del_scsipending_ent;
-
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: taskmgmt type=%d success; result=0x%x\n",
- tasktype, notifyresult);
- cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
- return SUCCESS;
-
-err_del_scsipending_ent:
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: taskmgmt type=%d not executed\n", tasktype);
- del_scsipending_ent(devdata, scsicmd_id);
- cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
- return FAILED;
-}
-
-/*
- * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_ABORT_TASK */
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_ABORT << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_LUN_RESET */
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
- * target on the bus
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
-{
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- shost_for_each_device(scsidev, scsidev->host) {
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- }
- rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_host_reset_handler - Not supported
- * @scsicmd: The scsicmd that needs to be aborted
- *
- * Return: Not supported, return SUCCESS
- */
-static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
- return SUCCESS;
-}
-
-/*
- * visorhba_get_info - Get information about SCSI device
- * @shp: Scsi host that is requesting information
- *
- * Return: String with visorhba information
- */
-static const char *visorhba_get_info(struct Scsi_Host *shp)
-{
- /* Return version string */
- return "visorhba";
-}
-
-/*
- * dma_data_dir_linux_to_spar - convert dma_data_direction value to
- * Unisys-specific equivalent
- * @d: dma direction value to convert
- *
- * Returns the Unisys-specific dma direction value corresponding to @d
- */
-static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
-{
- switch (d) {
- case DMA_BIDIRECTIONAL:
- return UIS_DMA_BIDIRECTIONAL;
- case DMA_TO_DEVICE:
- return UIS_DMA_TO_DEVICE;
- case DMA_FROM_DEVICE:
- return UIS_DMA_FROM_DEVICE;
- case DMA_NONE:
- return UIS_DMA_NONE;
- default:
- return UIS_DMA_NONE;
- }
-}
-
-/*
- * visorhba_queue_command_lck - Queues command to the Service Partition
- * @scsicmd: Command to be queued
- * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
- *
- * Queues to scsicmd to the ServicePartition after converting it to a
- * uiscmdrsp structure.
- *
- * Return: 0 if successfully queued to the Service Partition, otherwise
- * error code
- */
-static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd)
-{
- void (*visorhba_cmnd_done)(struct scsi_cmnd *) = scsi_done;
- struct uiscmdrsp *cmdrsp;
- struct scsi_device *scsidev = scsicmd->device;
- int insert_location;
- unsigned char *cdb = scsicmd->cmnd;
- struct Scsi_Host *scsihost = scsidev->host;
- unsigned int i;
- struct visorhba_devdata *devdata =
- (struct visorhba_devdata *)scsihost->hostdata;
- struct scatterlist *sg = NULL;
- struct scatterlist *sglist = NULL;
-
- if (devdata->serverdown || devdata->serverchangingstate)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
- (void *)scsicmd);
- if (insert_location < 0)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
- cmdrsp->cmdtype = CMD_SCSI_TYPE;
- /* save the pending insertion location. Deletion from pending
- * will return the scsicmd pointer for completion
- */
- cmdrsp->scsi.handle = insert_location;
-
- WARN_ON_ONCE(visorhba_cmnd_done != scsi_done);
- /* save destination */
- cmdrsp->scsi.vdest.channel = scsidev->channel;
- cmdrsp->scsi.vdest.id = scsidev->id;
- cmdrsp->scsi.vdest.lun = scsidev->lun;
- /* save datadir */
- cmdrsp->scsi.data_dir =
- dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
- memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
- cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
-
- /* keep track of the max buffer length so far. */
- if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
- devdata->max_buff_len = cmdrsp->scsi.bufflen;
-
- if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
- goto err_del_scsipending_ent;
-
- /* convert buffer to phys information */
- /* buffer is scatterlist - copy it out */
- sglist = scsi_sglist(scsicmd);
-
- for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
- cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
- cmdrsp->scsi.gpi_list[i].length = sg->length;
- }
- cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
-
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
- /* queue must be full and we aren't going to wait */
- goto err_del_scsipending_ent;
-
- return 0;
-
-err_del_scsipending_ent:
- del_scsipending_ent(devdata, insert_location);
- return SCSI_MLQUEUE_DEVICE_BUSY;
-}
-
-#ifdef DEF_SCSI_QCMD
-static DEF_SCSI_QCMD(visorhba_queue_command)
-#else
-#define visorhba_queue_command visorhba_queue_command_lck
-#endif
-
-/*
- * visorhba_slave_alloc - Called when new disk is discovered
- * @scsidev: New disk
- *
- * Create a new visordisk_info structure and add it to our
- * list of vdisks.
- *
- * Return: 0 on success, -ENOMEM on failure.
- */
-static int visorhba_slave_alloc(struct scsi_device *scsidev)
-{
- /* this is called by the midlayer before scan for new devices --
- * LLD can alloc any struct & do init if needed.
- */
- struct visordisk_info *vdisk;
- struct visorhba_devdata *devdata;
- struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
-
- /* already allocated return success */
- if (scsidev->hostdata)
- return 0;
-
- /* even though we errored, treat as success */
- devdata = (struct visorhba_devdata *)scsihost->hostdata;
- if (!devdata)
- return 0;
-
- vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
- if (!vdisk)
- return -ENOMEM;
-
- vdisk->sdev = scsidev;
- scsidev->hostdata = vdisk;
- return 0;
-}
-
-/*
- * visorhba_slave_destroy - Disk is going away, clean up resources.
- * @scsidev: Scsi device to destroy
- */
-static void visorhba_slave_destroy(struct scsi_device *scsidev)
-{
- /* midlevel calls this after device has been quiesced and
- * before it is to be deleted.
- */
- struct visordisk_info *vdisk;
-
- vdisk = scsidev->hostdata;
- scsidev->hostdata = NULL;
- kfree(vdisk);
-}
-
-static struct scsi_host_template visorhba_driver_template = {
- .name = "Unisys Visor HBA",
- .info = visorhba_get_info,
- .queuecommand = visorhba_queue_command,
- .eh_abort_handler = visorhba_abort_handler,
- .eh_device_reset_handler = visorhba_device_reset_handler,
- .eh_bus_reset_handler = visorhba_bus_reset_handler,
- .eh_host_reset_handler = visorhba_host_reset_handler,
-#define visorhba_MAX_CMNDS 128
- .can_queue = visorhba_MAX_CMNDS,
- .sg_tablesize = 64,
- .this_id = -1,
- .slave_alloc = visorhba_slave_alloc,
- .slave_destroy = visorhba_slave_destroy,
-};
-
-/*
- * info_debugfs_show - Debugfs interface to dump visorhba states
- * @seq: The sequence file to write information to
- * @v: Unused, but needed for use with seq file single_open invocation
- *
- * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
- *
- * Return: SUCCESS
- */
-static int info_debugfs_show(struct seq_file *seq, void *v)
-{
- struct visorhba_devdata *devdata = seq->private;
-
- seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
- seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
- seq_printf(seq, "interrupts_disabled = %llu\n",
- devdata->interrupts_disabled);
- seq_printf(seq, "interrupts_notme = %llu\n",
- devdata->interrupts_notme);
- seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
- if (devdata->flags_addr) {
- u64 phys_flags_addr =
- virt_to_phys((__force void *)devdata->flags_addr);
- seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
- phys_flags_addr);
- seq_printf(seq, "FeatureFlags = %llu\n",
- (u64)readq(devdata->flags_addr));
- }
- seq_printf(seq, "acquire_failed_cnt = %llu\n",
- devdata->acquire_failed_cnt);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(info_debugfs);
-
-/*
- * complete_taskmgmt_command - Complete task management
- * @idrtable: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- * @result: The result of the task management command
- *
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- */
-static void complete_taskmgmt_command(struct xarray *xa,
- struct uiscmdrsp *cmdrsp, int result)
-{
- wait_queue_head_t *wq =
- xa_load(xa, cmdrsp->scsitaskmgmt.notify_handle);
- int *scsi_result_ptr =
- xa_load(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
- if (unlikely(!(wq && scsi_result_ptr))) {
- pr_err("visorhba: no completion context; cmd will time out\n");
- return;
- }
-
- /* copy the result of the taskmgmt and
- * wake up the error handler that is waiting for this
- */
- pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
- *scsi_result_ptr = result;
- wake_up_all(wq);
-}
-
-/*
- * visorhba_serverdown_complete - Called when we are done cleaning up
- * from serverdown
- * @devdata: Visorhba instance on which to complete serverdown
- *
- * Called when we are done cleanning up from serverdown, stop processing
- * queue, fail pending IOs.
- */
-static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
-{
- int i;
- struct scsipending *pendingdel = NULL;
- struct scsi_cmnd *scsicmd = NULL;
- struct uiscmdrsp *cmdrsp;
- unsigned long flags;
-
- /* Stop using the IOVM response queue (queue should be drained
- * by the end)
- */
- visorbus_disable_channel_interrupts(devdata->dev);
-
- /* Fail commands that weren't completed */
- spin_lock_irqsave(&devdata->privlock, flags);
- for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
- pendingdel = &devdata->pending[i];
- switch (pendingdel->cmdtype) {
- case CMD_SCSI_TYPE:
- scsicmd = pendingdel->sent;
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- break;
- case CMD_SCSITASKMGMT_TYPE:
- cmdrsp = pendingdel->sent;
- complete_taskmgmt_command(&devdata->xa, cmdrsp,
- TASK_MGMT_FAILED);
- break;
- default:
- break;
- }
- pendingdel->cmdtype = 0;
- pendingdel->sent = NULL;
- }
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- devdata->serverdown = true;
- devdata->serverchangingstate = false;
-}
-
-/*
- * visorhba_serverdown - Got notified that the IOVM is down
- * @devdata: Visorhba that is being serviced by downed IOVM
- *
- * Something happened to the IOVM, return immediately and
- * schedule cleanup work.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_serverdown(struct visorhba_devdata *devdata)
-{
- if (!devdata->serverdown && !devdata->serverchangingstate) {
- devdata->serverchangingstate = true;
- visorhba_serverdown_complete(devdata);
- } else if (devdata->serverchangingstate) {
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * do_scsi_linuxstat - Scsi command returned linuxstat
- * @cmdrsp: Response from IOVM
- * @scsicmd: Command issued
- *
- * Don't log errors for disk-not-present inquiries.
- */
-static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- struct visordisk_info *vdisk;
- struct scsi_device *scsidev;
-
- scsidev = scsicmd->device;
- memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
-
- /* Do not log errors for disk-not-present inquiries */
- if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
- (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
- cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
- return;
- /* Okay see what our error_count is here.... */
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
- atomic_inc(&vdisk->error_count);
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- }
-}
-
-static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
- bool is_lun0)
-{
- if (len < NO_DISK_INQUIRY_RESULT_LEN)
- return -EINVAL;
- memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
- buf[2] = SCSI_SPC2_VER;
- if (is_lun0) {
- buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
- buf[3] = DEV_HISUPPORT;
- } else {
- buf[0] = DEV_NOT_CAPABLE;
- }
- buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
- strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
- return 0;
-}
-
-/*
- * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
- * @cmdrsp: Response from IOVM
- * @scsicmd: Command issued
- *
- * Handle response when no linuxstat was returned.
- */
-static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- struct scsi_device *scsidev;
- unsigned char *buf;
- struct scatterlist *sg;
- unsigned int i;
- char *this_page;
- char *this_page_orig;
- int bufind = 0;
- struct visordisk_info *vdisk;
-
- scsidev = scsicmd->device;
- if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
- cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
- if (cmdrsp->scsi.no_disk_result == 0)
- return;
-
- buf = kzalloc(36, GFP_KERNEL);
- if (!buf)
- return;
-
- /* Linux scsi code wants a device at Lun 0
- * to issue report luns, but we don't want
- * a disk there so we'll present a processor
- * there.
- */
- set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
- scsidev->lun == 0);
-
- if (scsi_sg_count(scsicmd) == 0) {
- memcpy(scsi_sglist(scsicmd), buf,
- cmdrsp->scsi.bufflen);
- kfree(buf);
- return;
- }
-
- scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
- this_page_orig = kmap_atomic(sg_page(sg));
- this_page = (void *)((unsigned long)this_page_orig |
- sg->offset);
- memcpy(this_page, buf + bufind, sg->length);
- kunmap_atomic(this_page_orig);
- }
- kfree(buf);
- } else {
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->ios_threshold) > 0) {
- atomic_dec(&vdisk->ios_threshold);
- if (atomic_read(&vdisk->ios_threshold) == 0)
- atomic_set(&vdisk->error_count, 0);
- }
- }
-}
-
-/*
- * complete_scsi_command - Complete a scsi command
- * @uiscmdrsp: Response from Service Partition
- * @scsicmd: The scsi command
- *
- * Response was returned by the Service Partition. Finish it and send
- * completion to the scsi midlayer.
- */
-static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- /* take what we need out of cmdrsp and complete the scsicmd */
- scsicmd->result = cmdrsp->scsi.linuxstat;
- if (cmdrsp->scsi.linuxstat)
- do_scsi_linuxstat(cmdrsp, scsicmd);
- else
- do_scsi_nolinuxstat(cmdrsp, scsicmd);
-
- scsi_done(scsicmd);
-}
-
-/*
- * drain_queue - Pull responses out of iochannel
- * @cmdrsp: Response from the IOSP
- * @devdata: Device that owns this iochannel
- *
- * Pulls responses out of the iochannel and process the responses.
- */
-static void drain_queue(struct uiscmdrsp *cmdrsp,
- struct visorhba_devdata *devdata)
-{
- struct scsi_cmnd *scsicmd;
-
- while (1) {
- /* queue empty */
- if (visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- break;
- if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
- /* scsicmd location is returned by the
- * deletion
- */
- scsicmd = del_scsipending_ent(devdata,
- cmdrsp->scsi.handle);
- if (!scsicmd)
- break;
- /* complete the orig cmd */
- complete_scsi_command(cmdrsp, scsicmd);
- } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
- if (!del_scsipending_ent(devdata,
- cmdrsp->scsitaskmgmt.handle))
- break;
- complete_taskmgmt_command(&devdata->xa, cmdrsp,
- cmdrsp->scsitaskmgmt.result);
- } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
- dev_err_once(&devdata->dev->device,
- "ignoring unsupported NOTIFYGUEST\n");
- /* cmdrsp is now available for re-use */
- }
-}
-
-/*
- * This is used only when this driver is active as an hba driver in the
- * client guest partition. It is called periodically so we can obtain
- * and process the command respond from the IO Service Partition periodically.
- */
-static void visorhba_channel_interrupt(struct visor_device *dev)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- drain_queue(devdata->cmdrsp, devdata);
-}
-
-/*
- * visorhba_pause - Function to handle visorbus pause messages
- * @dev: Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Something has happened to the IO Service Partition that is
- * handling this device. Quiet this device and reset commands
- * so that the Service Partition can be corrected.
- *
- * Return: SUCCESS
- */
-static int visorhba_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
- visorhba_serverdown(devdata);
- complete_func(dev, 0);
- return 0;
-}
-
-/*
- * visorhba_resume - Function called when the IO Service Partition is back
- * @dev: Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Yay! The IO Service Partition is back, the channel has been wiped
- * so lets re-establish connection and start processing responses.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visorhba_devdata *devdata;
-
- devdata = dev_get_drvdata(&dev->device);
- if (!devdata)
- return -EINVAL;
-
- if (devdata->serverdown && !devdata->serverchangingstate)
- devdata->serverchangingstate = true;
-
- visorbus_enable_channel_interrupts(dev);
- devdata->serverdown = false;
- devdata->serverchangingstate = false;
-
- return 0;
-}
-
-/*
- * visorhba_probe - Device has been discovered; do acquire
- * @dev: visor_device that was discovered
- *
- * A new HBA was discovered; do the initial connections of it.
- *
- * Return: 0 on success, otherwise error code
- */
-static int visorhba_probe(struct visor_device *dev)
-{
- struct Scsi_Host *scsihost;
- struct vhba_config_max max;
- struct visorhba_devdata *devdata = NULL;
- int err, channel_offset;
- u64 features;
-
- scsihost = scsi_host_alloc(&visorhba_driver_template,
- sizeof(*devdata));
- if (!scsihost)
- return -ENODEV;
-
- channel_offset = offsetof(struct visor_io_channel, vhba.max);
- err = visorbus_read_channel(dev, channel_offset, &max,
- sizeof(struct vhba_config_max));
- if (err < 0)
- goto err_scsi_host_put;
-
- scsihost->max_id = (unsigned int)max.max_id;
- scsihost->max_lun = (unsigned int)max.max_lun;
- scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
- scsihost->max_sectors =
- (unsigned short)(max.max_io_size >> 9);
- scsihost->sg_tablesize =
- (unsigned short)(max.max_io_size / PAGE_SIZE);
- if (scsihost->sg_tablesize > MAX_PHYS_INFO)
- scsihost->sg_tablesize = MAX_PHYS_INFO;
- err = scsi_add_host(scsihost, &dev->device);
- if (err < 0)
- goto err_scsi_host_put;
-
- devdata = (struct visorhba_devdata *)scsihost->hostdata;
- devdata->dev = dev;
- dev_set_drvdata(&dev->device, devdata);
-
- devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
- visorhba_debugfs_dir);
- if (!devdata->debugfs_dir) {
- err = -ENOMEM;
- goto err_scsi_remove_host;
- }
- devdata->debugfs_info =
- debugfs_create_file("info", 0440,
- devdata->debugfs_dir, devdata,
- &info_debugfs_fops);
- if (!devdata->debugfs_info) {
- err = -ENOMEM;
- goto err_debugfs_dir;
- }
-
- spin_lock_init(&devdata->privlock);
- devdata->serverdown = false;
- devdata->serverchangingstate = false;
- devdata->scsihost = scsihost;
-
- channel_offset = offsetof(struct visor_io_channel,
- channel_header.features);
- err = visorbus_read_channel(dev, channel_offset, &features, 8);
- if (err)
- goto err_debugfs_info;
- features |= VISOR_CHANNEL_IS_POLLING;
- err = visorbus_write_channel(dev, channel_offset, &features, 8);
- if (err)
- goto err_debugfs_info;
-
- xa_init(&devdata->xa);
-
- devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
- visorbus_enable_channel_interrupts(dev);
-
- scsi_scan_host(scsihost);
-
- return 0;
-
-err_debugfs_info:
- debugfs_remove(devdata->debugfs_info);
-
-err_debugfs_dir:
- debugfs_remove_recursive(devdata->debugfs_dir);
-
-err_scsi_remove_host:
- scsi_remove_host(scsihost);
-
-err_scsi_host_put:
- scsi_host_put(scsihost);
- return err;
-}
-
-/*
- * visorhba_remove - Remove a visorhba device
- * @dev: Device to remove
- *
- * Removes the visorhba device.
- */
-static void visorhba_remove(struct visor_device *dev)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
- struct Scsi_Host *scsihost = NULL;
-
- if (!devdata)
- return;
-
- scsihost = devdata->scsihost;
- kfree(devdata->cmdrsp);
- visorbus_disable_channel_interrupts(dev);
- scsi_remove_host(scsihost);
- scsi_host_put(scsihost);
-
- dev_set_drvdata(&dev->device, NULL);
- debugfs_remove(devdata->debugfs_info);
- debugfs_remove_recursive(devdata->debugfs_dir);
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visorhba_driver = {
- .name = "visorhba",
- .owner = THIS_MODULE,
- .channel_types = visorhba_channel_types,
- .probe = visorhba_probe,
- .remove = visorhba_remove,
- .pause = visorhba_pause,
- .resume = visorhba_resume,
- .channel_interrupt = visorhba_channel_interrupt,
-};
-
-/*
- * visorhba_init - Driver init routine
- *
- * Initialize the visorhba driver and register it with visorbus
- * to handle s-Par virtual host bus adapter.
- *
- * Return: 0 on success, error code otherwise
- */
-static int visorhba_init(void)
-{
- int rc;
-
- visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
- if (!visorhba_debugfs_dir)
- return -ENOMEM;
-
- rc = visorbus_register_visor_driver(&visorhba_driver);
- if (rc)
- goto cleanup_debugfs;
-
- return 0;
-
-cleanup_debugfs:
- debugfs_remove_recursive(visorhba_debugfs_dir);
-
- return rc;
-}
-
-/*
- * visorhba_exit - Driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visorhba_exit(void)
-{
- visorbus_unregister_visor_driver(&visorhba_driver);
- debugfs_remove_recursive(visorhba_debugfs_dir);
-}
-
-module_init(visorhba_init);
-module_exit(visorhba_exit);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
deleted file mode 100644
index 5f036393aee9..000000000000
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorinput configuration
-#
-
-config UNISYS_VISORINPUT
- tristate "Unisys visorinput driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && INPUT
- help
- The Unisys s-Par visorinput driver provides a virtualized system
- console (keyboard and mouse) that is accessible through the
- s-Par firmware's user interface. s-Par provides video using the EFI
- GOP protocol, so If this driver is not present, the Linux guest should
- still boot with visible output in the partition desktop, but keyboard
- and mouse interaction will not be available.
-
diff --git a/drivers/staging/unisys/visorinput/Makefile b/drivers/staging/unisys/visorinput/Makefile
deleted file mode 100644
index 68ced7c8a65f..000000000000
--- a/drivers/staging/unisys/visorinput/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorinput
-#
-
-obj-$(CONFIG_UNISYS_VISORINPUT) += visorinput.o
-
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
deleted file mode 100644
index dffa71ac3cc5..000000000000
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ /dev/null
@@ -1,788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This driver lives in a generic guest Linux partition, and registers to
- * receive keyboard and mouse channels from the visorbus driver. It reads
- * inputs from such channels, and delivers it to the Linux OS in the
- * standard way the Linux expects for input drivers.
- */
-
-#include <linux/fb.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* These defines identify mouse and keyboard activity which is specified by the
- * firmware to the host using the cmsimpleinput protocol. @ingroup coretypes
- */
-/* only motion; arg1=x, arg2=y */
-#define INPUTACTION_XY_MOTION 1
-
-/* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_DOWN 2
-#define INPUTACTION_MOUSE_BUTTON_UP 3
-#define INPUTACTION_MOUSE_BUTTON_CLICK 4
-#define INPUTACTION_MOUSE_BUTTON_DCLICK 5
-
-/* arg1: wheel rotation away from/toward user */
-#define INPUTACTION_WHEEL_ROTATE_AWAY 6
-#define INPUTACTION_WHEEL_ROTATE_TOWARD 7
-
-/* arg1: scancode, as follows: If arg1 <= 0xff, it's a 1-byte scancode and arg1
- * is that scancode. If arg1 > 0xff, it's a 2-byte scanecode, with the 1st
- * byte in the low 8 bits, and the 2nd byte in the high 8 bits.
- * E.g., the right ALT key would appear as x'38e0'.
- */
-#define INPUTACTION_KEY_DOWN 64
-#define INPUTACTION_KEY_UP 65
-#define INPUTACTION_KEY_DOWN_UP 67
-
-/* arg1: scancode (in same format as inputaction_keyDown); MUST refer to one of
- * the locking keys, like capslock, numlock, or scrolllock.
- * arg2: 1 iff locking key should be in the LOCKED position (e.g., light is ON)
- */
-#define INPUTACTION_SET_LOCKING_KEY_STATE 66
-
-/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
-#define VISOR_KEYBOARD_CHANNEL_GUID \
- GUID_INIT(0xc73416d0, 0xb0b8, 0x44af, \
- 0xb3, 0x4, 0x9d, 0x2a, 0xe9, 0x9f, 0x1b, 0x3d)
-#define VISOR_KEYBOARD_CHANNEL_GUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
-
-/* Mouse channel {addf07d4-94a9-46e2-81c3-61abcdbdbd87} */
-#define VISOR_MOUSE_CHANNEL_GUID \
- GUID_INIT(0xaddf07d4, 0x94a9, 0x46e2, \
- 0x81, 0xc3, 0x61, 0xab, 0xcd, 0xbd, 0xbd, 0x87)
-#define VISOR_MOUSE_CHANNEL_GUID_STR "addf07d4-94a9-46e2-81c3-61abcdbdbd87"
-
-#define PIXELS_ACROSS_DEFAULT 1024
-#define PIXELS_DOWN_DEFAULT 768
-#define KEYCODE_TABLE_BYTES 256
-
-struct visor_inputactivity {
- u16 action;
- u16 arg1;
- u16 arg2;
- u16 arg3;
-} __packed;
-
-struct visor_inputreport {
- u64 seq_no;
- struct visor_inputactivity activity;
-} __packed;
-
-/* header of keyboard/mouse channels */
-struct visor_input_channel_data {
- u32 n_input_reports;
- union {
- struct {
- u16 x_res;
- u16 y_res;
- } mouse;
- struct {
- u32 flags;
- } keyboard;
- };
-} __packed;
-
-enum visorinput_dev_type {
- visorinput_keyboard,
- visorinput_mouse,
-};
-
-/*
- * This is the private data that we store for each device. A pointer to this
- * struct is maintained via dev_get_drvdata() / dev_set_drvdata() for each
- * struct device.
- */
-struct visorinput_devdata {
- struct visor_device *dev;
- /* lock for dev */
- struct mutex lock_visor_dev;
- struct input_dev *visorinput_dev;
- bool paused;
- bool interrupts_enabled;
- /* size of following array */
- unsigned int keycode_table_bytes;
- /* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
- unsigned char keycode_table[];
-};
-
-static const guid_t visor_keyboard_channel_guid = VISOR_KEYBOARD_CHANNEL_GUID;
-static const guid_t visor_mouse_channel_guid = VISOR_MOUSE_CHANNEL_GUID;
-
-/*
- * Borrowed from drivers/input/keyboard/atakbd.c
- * This maps 1-byte scancodes to keycodes.
- */
-static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
- /* American layout */
- [0] = KEY_GRAVE,
- [1] = KEY_ESC,
- [2] = KEY_1,
- [3] = KEY_2,
- [4] = KEY_3,
- [5] = KEY_4,
- [6] = KEY_5,
- [7] = KEY_6,
- [8] = KEY_7,
- [9] = KEY_8,
- [10] = KEY_9,
- [11] = KEY_0,
- [12] = KEY_MINUS,
- [13] = KEY_EQUAL,
- [14] = KEY_BACKSPACE,
- [15] = KEY_TAB,
- [16] = KEY_Q,
- [17] = KEY_W,
- [18] = KEY_E,
- [19] = KEY_R,
- [20] = KEY_T,
- [21] = KEY_Y,
- [22] = KEY_U,
- [23] = KEY_I,
- [24] = KEY_O,
- [25] = KEY_P,
- [26] = KEY_LEFTBRACE,
- [27] = KEY_RIGHTBRACE,
- [28] = KEY_ENTER,
- [29] = KEY_LEFTCTRL,
- [30] = KEY_A,
- [31] = KEY_S,
- [32] = KEY_D,
- [33] = KEY_F,
- [34] = KEY_G,
- [35] = KEY_H,
- [36] = KEY_J,
- [37] = KEY_K,
- [38] = KEY_L,
- [39] = KEY_SEMICOLON,
- [40] = KEY_APOSTROPHE,
- [41] = KEY_GRAVE,
- [42] = KEY_LEFTSHIFT,
- [43] = KEY_BACKSLASH,
- [44] = KEY_Z,
- [45] = KEY_X,
- [46] = KEY_C,
- [47] = KEY_V,
- [48] = KEY_B,
- [49] = KEY_N,
- [50] = KEY_M,
- [51] = KEY_COMMA,
- [52] = KEY_DOT,
- [53] = KEY_SLASH,
- [54] = KEY_RIGHTSHIFT,
- [55] = KEY_KPASTERISK,
- [56] = KEY_LEFTALT,
- [57] = KEY_SPACE,
- [58] = KEY_CAPSLOCK,
- [59] = KEY_F1,
- [60] = KEY_F2,
- [61] = KEY_F3,
- [62] = KEY_F4,
- [63] = KEY_F5,
- [64] = KEY_F6,
- [65] = KEY_F7,
- [66] = KEY_F8,
- [67] = KEY_F9,
- [68] = KEY_F10,
- [69] = KEY_NUMLOCK,
- [70] = KEY_SCROLLLOCK,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
- [74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
- [78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- /* enables UK backslash+pipe key and FR lessthan+greaterthan key */
- [86] = KEY_102ND,
- [87] = KEY_F11,
- [88] = KEY_F12,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK,
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
- [96] = KEY_KPENTER,
- [97] = KEY_RIGHTCTRL,
- [98] = KEY_KPSLASH,
- [99] = KEY_KPLEFTPAREN,
- [100] = KEY_KPRIGHTPAREN,
- [101] = KEY_KPSLASH,
- [102] = KEY_HOME,
- [103] = KEY_UP,
- [104] = KEY_PAGEUP,
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_END,
- [108] = KEY_DOWN,
- [109] = KEY_PAGEDOWN,
- [110] = KEY_INSERT,
- [111] = KEY_DELETE,
- [112] = KEY_MACRO,
- [113] = KEY_MUTE
-};
-
-/*
- * This maps the <xx> in extended scancodes of the form "0xE0 <xx>" into
- * keycodes.
- */
-static const unsigned char visorkbd_ext_keycode[KEYCODE_TABLE_BYTES] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10 */
- 0, 0, 0, 0, KEY_KPENTER, KEY_RIGHTCTRL, 0, 0, /* 0x18 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20 */
- KEY_RIGHTALT, 0, 0, 0, 0, 0, 0, 0, /* 0x28 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x30 */
- KEY_RIGHTALT /* AltGr */, 0, 0, 0, 0, 0, 0, 0, /* 0x38 */
- 0, 0, 0, 0, 0, 0, 0, KEY_HOME, /* 0x40 */
- KEY_UP, KEY_PAGEUP, 0, KEY_LEFT, 0, KEY_RIGHT, 0, KEY_END, /* 0x48 */
- KEY_DOWN, KEY_PAGEDOWN, KEY_INSERT, KEY_DELETE, 0, 0, 0, 0, /* 0x50 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x58 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x70 */
-};
-
-static int visorinput_open(struct input_dev *visorinput_dev)
-{
- struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
- if (!devdata) {
- dev_err(&visorinput_dev->dev,
- "%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
- return -EINVAL;
- }
- dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__);
-
- /*
- * If we're not paused, really enable interrupts. Regardless of whether
- * we are paused, set a flag indicating interrupts should be enabled so
- * when we resume, interrupts will really be enabled.
- */
- mutex_lock(&devdata->lock_visor_dev);
- devdata->interrupts_enabled = true;
- if (devdata->paused)
- goto out_unlock;
- visorbus_enable_channel_interrupts(devdata->dev);
-
-out_unlock:
- mutex_unlock(&devdata->lock_visor_dev);
- return 0;
-}
-
-static void visorinput_close(struct input_dev *visorinput_dev)
-{
- struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
- if (!devdata) {
- dev_err(&visorinput_dev->dev,
- "%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
- return;
- }
- dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__);
-
- /*
- * If we're not paused, really disable interrupts. Regardless of
- * whether we are paused, set a flag indicating interrupts should be
- * disabled so when we resume we will not re-enable them.
- */
- mutex_lock(&devdata->lock_visor_dev);
- devdata->interrupts_enabled = false;
- if (devdata->paused)
- goto out_unlock;
- visorbus_disable_channel_interrupts(devdata->dev);
-
-out_unlock:
- mutex_unlock(&devdata->lock_visor_dev);
-}
-
-/*
- * setup_client_keyboard() initializes and returns a Linux input node that we
- * can use to deliver keyboard inputs to Linux. We of course do this when we
- * see keyboard inputs coming in on a keyboard channel.
- */
-static struct input_dev *setup_client_keyboard(void *devdata,
- unsigned char *keycode_table)
-
-{
- int i;
- struct input_dev *visorinput_dev = input_allocate_device();
-
- if (!visorinput_dev)
- return NULL;
-
- visorinput_dev->name = "visor Keyboard";
- visorinput_dev->phys = "visorkbd:input0";
- visorinput_dev->id.bustype = BUS_VIRTUAL;
- visorinput_dev->id.vendor = 0x0001;
- visorinput_dev->id.product = 0x0001;
- visorinput_dev->id.version = 0x0100;
-
- visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) |
- BIT_MASK(EV_REP) |
- BIT_MASK(EV_LED);
- visorinput_dev->ledbit[0] = BIT_MASK(LED_CAPSL) |
- BIT_MASK(LED_SCROLLL) |
- BIT_MASK(LED_NUML);
- visorinput_dev->keycode = keycode_table;
- /* sizeof(unsigned char) */
- visorinput_dev->keycodesize = 1;
- visorinput_dev->keycodemax = KEYCODE_TABLE_BYTES;
-
- for (i = 1; i < visorinput_dev->keycodemax; i++)
- set_bit(keycode_table[i], visorinput_dev->keybit);
- for (i = 1; i < visorinput_dev->keycodemax; i++)
- set_bit(keycode_table[i + KEYCODE_TABLE_BYTES],
- visorinput_dev->keybit);
-
- visorinput_dev->open = visorinput_open;
- visorinput_dev->close = visorinput_close;
- /* pre input_register! */
- input_set_drvdata(visorinput_dev, devdata);
-
- return visorinput_dev;
-}
-
-static struct input_dev *setup_client_mouse(void *devdata, unsigned int xres,
- unsigned int yres)
-{
- struct input_dev *visorinput_dev = input_allocate_device();
-
- if (!visorinput_dev)
- return NULL;
-
- visorinput_dev->name = "visor Mouse";
- visorinput_dev->phys = "visormou:input0";
- visorinput_dev->id.bustype = BUS_VIRTUAL;
- visorinput_dev->id.vendor = 0x0001;
- visorinput_dev->id.product = 0x0002;
- visorinput_dev->id.version = 0x0100;
-
- visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- set_bit(BTN_LEFT, visorinput_dev->keybit);
- set_bit(BTN_RIGHT, visorinput_dev->keybit);
- set_bit(BTN_MIDDLE, visorinput_dev->keybit);
-
- if (xres == 0)
- xres = PIXELS_ACROSS_DEFAULT;
- if (yres == 0)
- yres = PIXELS_DOWN_DEFAULT;
- input_set_abs_params(visorinput_dev, ABS_X, 0, xres, 0, 0);
- input_set_abs_params(visorinput_dev, ABS_Y, 0, yres, 0, 0);
-
- visorinput_dev->open = visorinput_open;
- visorinput_dev->close = visorinput_close;
- /* pre input_register! */
- input_set_drvdata(visorinput_dev, devdata);
- input_set_capability(visorinput_dev, EV_REL, REL_WHEEL);
-
- return visorinput_dev;
-}
-
-static struct visorinput_devdata *devdata_create(struct visor_device *dev,
- enum visorinput_dev_type dtype)
-{
- struct visorinput_devdata *devdata = NULL;
- unsigned int extra_bytes = 0;
- unsigned int size, xres, yres, err;
- struct visor_input_channel_data data;
-
- if (dtype == visorinput_keyboard)
- /* allocate room for devdata->keycode_table, filled in below */
- extra_bytes = KEYCODE_TABLE_BYTES * 2;
- devdata = kzalloc(struct_size(devdata, keycode_table, extra_bytes),
- GFP_KERNEL);
- if (!devdata)
- return NULL;
- mutex_init(&devdata->lock_visor_dev);
- mutex_lock(&devdata->lock_visor_dev);
- devdata->dev = dev;
-
- /*
- * visorinput_open() can be called as soon as input_register_device()
- * happens, and that will enable channel interrupts. Setting paused
- * prevents us from getting into visorinput_channel_interrupt() prior
- * to the device structure being totally initialized.
- */
- devdata->paused = true;
-
- /*
- * This is an input device in a client guest partition, so we need to
- * create whatever input nodes are necessary to deliver our inputs to
- * the guest OS.
- */
- switch (dtype) {
- case visorinput_keyboard:
- devdata->keycode_table_bytes = extra_bytes;
- memcpy(devdata->keycode_table, visorkbd_keycode,
- KEYCODE_TABLE_BYTES);
- memcpy(devdata->keycode_table + KEYCODE_TABLE_BYTES,
- visorkbd_ext_keycode, KEYCODE_TABLE_BYTES);
- devdata->visorinput_dev = setup_client_keyboard
- (devdata, devdata->keycode_table);
- if (!devdata->visorinput_dev)
- goto cleanups_register;
- break;
- case visorinput_mouse:
- size = sizeof(struct visor_input_channel_data);
- err = visorbus_read_channel(dev, sizeof(struct channel_header),
- &data, size);
- if (err)
- goto cleanups_register;
- xres = data.mouse.x_res;
- yres = data.mouse.y_res;
- devdata->visorinput_dev = setup_client_mouse(devdata, xres,
- yres);
- if (!devdata->visorinput_dev)
- goto cleanups_register;
- break;
- default:
- /* No other input devices supported */
- break;
- }
-
- dev_set_drvdata(&dev->device, devdata);
- mutex_unlock(&devdata->lock_visor_dev);
-
- /*
- * Device struct is completely set up now, with the exception of
- * visorinput_dev being registered. We need to unlock before we
- * register the device, because this can cause an on-stack call of
- * visorinput_open(), which would deadlock if we had the lock.
- */
- if (input_register_device(devdata->visorinput_dev)) {
- input_free_device(devdata->visorinput_dev);
- goto err_kfree_devdata;
- }
-
- mutex_lock(&devdata->lock_visor_dev);
- /*
- * Establish calls to visorinput_channel_interrupt() if that is the
- * desired state that we've kept track of in interrupts_enabled while
- * the device was being created.
- */
- devdata->paused = false;
- if (devdata->interrupts_enabled)
- visorbus_enable_channel_interrupts(dev);
- mutex_unlock(&devdata->lock_visor_dev);
-
- return devdata;
-
-cleanups_register:
- mutex_unlock(&devdata->lock_visor_dev);
-err_kfree_devdata:
- kfree(devdata);
- return NULL;
-}
-
-static int visorinput_probe(struct visor_device *dev)
-{
- const guid_t *guid;
- enum visorinput_dev_type dtype;
-
- guid = visorchannel_get_guid(dev->visorchannel);
- if (guid_equal(guid, &visor_mouse_channel_guid))
- dtype = visorinput_mouse;
- else if (guid_equal(guid, &visor_keyboard_channel_guid))
- dtype = visorinput_keyboard;
- else
- return -ENODEV;
- visorbus_disable_channel_interrupts(dev);
- if (!devdata_create(dev, dtype))
- return -ENOMEM;
- return 0;
-}
-
-static void unregister_client_input(struct input_dev *visorinput_dev)
-{
- if (visorinput_dev)
- input_unregister_device(visorinput_dev);
-}
-
-static void visorinput_remove(struct visor_device *dev)
-{
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- mutex_lock(&devdata->lock_visor_dev);
- visorbus_disable_channel_interrupts(dev);
-
- /*
- * due to above, at this time no thread of execution will be in
- * visorinput_channel_interrupt()
- */
-
- dev_set_drvdata(&dev->device, NULL);
- mutex_unlock(&devdata->lock_visor_dev);
-
- unregister_client_input(devdata->visorinput_dev);
- kfree(devdata);
-}
-
-/*
- * Make it so the current locking state of the locking key indicated by
- * <keycode> is as indicated by <desired_state> (1=locked, 0=unlocked).
- */
-static void handle_locking_key(struct input_dev *visorinput_dev, int keycode,
- int desired_state)
-{
- int led;
-
- switch (keycode) {
- case KEY_CAPSLOCK:
- led = LED_CAPSL;
- break;
- case KEY_SCROLLLOCK:
- led = LED_SCROLLL;
- break;
- case KEY_NUMLOCK:
- led = LED_NUML;
- break;
- default:
- return;
- }
- if (test_bit(led, visorinput_dev->led) != desired_state) {
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- __change_bit(led, visorinput_dev->led);
- }
-}
-
-/*
- * <scancode> is either a 1-byte scancode, or an extended 16-bit scancode with
- * 0xE0 in the low byte and the extended scancode value in the next higher byte.
- */
-static int scancode_to_keycode(int scancode)
-{
- if (scancode > 0xff)
- return visorkbd_ext_keycode[(scancode >> 8) & 0xff];
-
- return visorkbd_keycode[scancode];
-}
-
-static int calc_button(int x)
-{
- switch (x) {
- case 1:
- return BTN_LEFT;
- case 2:
- return BTN_MIDDLE;
- case 3:
- return BTN_RIGHT;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * This is used only when this driver is active as an input driver in the
- * client guest partition. It is called periodically so we can obtain inputs
- * from the channel, and deliver them to the guest OS.
- */
-static void visorinput_channel_interrupt(struct visor_device *dev)
-{
- struct visor_inputreport r;
- int scancode, keycode;
- struct input_dev *visorinput_dev;
- int xmotion, ymotion, button;
- int i;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- visorinput_dev = devdata->visorinput_dev;
-
- while (!visorchannel_signalremove(dev->visorchannel, 0, &r)) {
- scancode = r.activity.arg1;
- keycode = scancode_to_keycode(scancode);
- switch (r.activity.action) {
- case INPUTACTION_KEY_DOWN:
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_KEY_UP:
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_KEY_DOWN_UP:
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_SET_LOCKING_KEY_STATE:
- handle_locking_key(visorinput_dev, keycode,
- r.activity.arg2);
- break;
- case INPUTACTION_XY_MOTION:
- xmotion = r.activity.arg1;
- ymotion = r.activity.arg2;
- input_report_abs(visorinput_dev, ABS_X, xmotion);
- input_report_abs(visorinput_dev, ABS_Y, ymotion);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_DOWN:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_UP:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_CLICK:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_DCLICK:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- for (i = 0; i < 2; i++) {
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- }
- break;
- case INPUTACTION_WHEEL_ROTATE_AWAY:
- input_report_rel(visorinput_dev, REL_WHEEL, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_WHEEL_ROTATE_TOWARD:
- input_report_rel(visorinput_dev, REL_WHEEL, -1);
- input_sync(visorinput_dev);
- break;
- default:
- /* Unsupported input action */
- break;
- }
- }
-}
-
-static int visorinput_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- int rc;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata) {
- rc = -ENODEV;
- goto out;
- }
-
- mutex_lock(&devdata->lock_visor_dev);
- if (devdata->paused) {
- rc = -EBUSY;
- goto out_locked;
- }
- if (devdata->interrupts_enabled)
- visorbus_disable_channel_interrupts(dev);
-
- /*
- * due to above, at this time no thread of execution will be in
- * visorinput_channel_interrupt()
- */
- devdata->paused = true;
- complete_func(dev, 0);
- rc = 0;
-out_locked:
- mutex_unlock(&devdata->lock_visor_dev);
-out:
- return rc;
-}
-
-static int visorinput_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- int rc;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata) {
- rc = -ENODEV;
- goto out;
- }
- mutex_lock(&devdata->lock_visor_dev);
- if (!devdata->paused) {
- rc = -EBUSY;
- goto out_locked;
- }
- devdata->paused = false;
- complete_func(dev, 0);
-
- /*
- * Re-establish calls to visorinput_channel_interrupt() if that is the
- * desired state that we've kept track of in interrupts_enabled while
- * the device was paused.
- */
- if (devdata->interrupts_enabled)
- visorbus_enable_channel_interrupts(dev);
-
- rc = 0;
-out_locked:
- mutex_unlock(&devdata->lock_visor_dev);
-out:
- return rc;
-}
-
-/* GUIDS for all channel types supported by this driver. */
-static struct visor_channeltype_descriptor visorinput_channel_types[] = {
- { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard",
- sizeof(struct channel_header), 0 },
- { VISOR_MOUSE_CHANNEL_GUID, "mouse", sizeof(struct channel_header), 0 },
- {}
-};
-
-static struct visor_driver visorinput_driver = {
- .name = "visorinput",
- .owner = THIS_MODULE,
- .channel_types = visorinput_channel_types,
- .probe = visorinput_probe,
- .remove = visorinput_remove,
- .channel_interrupt = visorinput_channel_interrupt,
- .pause = visorinput_pause,
- .resume = visorinput_resume,
-};
-
-module_driver(visorinput_driver, visorbus_register_visor_driver,
- visorbus_unregister_visor_driver);
-
-MODULE_DEVICE_TABLE(visorbus, visorinput_channel_types);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par human input driver for virtual keyboard/mouse");
-
-MODULE_ALIAS("visorbus:" VISOR_MOUSE_CHANNEL_GUID_STR);
-MODULE_ALIAS("visorbus:" VISOR_KEYBOARD_CHANNEL_GUID_STR);
diff --git a/drivers/staging/unisys/visornic/Kconfig b/drivers/staging/unisys/visornic/Kconfig
deleted file mode 100644
index 3f8f5570821b..000000000000
--- a/drivers/staging/unisys/visornic/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visornic configuration
-#
-
-config UNISYS_VISORNIC
- tristate "Unisys visornic driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && NET
- help
- The Unisys Visornic driver provides support for s-Par network
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a network device, the probe function of
- visornic is called to create the netdev device. Networking on
- s-Par switches will not work if this driver is not selected.
- If you say Y here, you will enable the Unisys visornic driver.
-
diff --git a/drivers/staging/unisys/visornic/Makefile b/drivers/staging/unisys/visornic/Makefile
deleted file mode 100644
index f2984880c340..000000000000
--- a/drivers/staging/unisys/visornic/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORNIC) += visornic.o
-
-visornic-y := visornic_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
deleted file mode 100644
index 643432458105..000000000000
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ /dev/null
@@ -1,2148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/* This driver lives in a spar partition, and registers to ethernet io
- * channels from the visorbus driver. It creates netdev devices and
- * forwards transmit to the IO channel and accepts rcvs from the IO
- * Partition via the IO channel.
- */
-
-#include <linux/debugfs.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/kthread.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/visorbus.h>
-
-#include "iochannel.h"
-
-#define VISORNIC_INFINITE_RSP_WAIT 0
-
-/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
- * = 163840 bytes
- */
-#define MAX_BUF 163840
-#define NAPI_WEIGHT 64
-
-/* GUIDS for director channel type supported by this driver. */
-/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
-#define VISOR_VNIC_CHANNEL_GUID \
- GUID_INIT(0x8cd5994d, 0xc58e, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VNIC_CHANNEL_GUID_STR \
- "8cd5994d-c58e-11da-95a9-00e08161165f"
-
-static struct visor_channeltype_descriptor visornic_channel_types[] = {
- /* Note that the only channel type we expect to be reported by the
- * bus driver is the VISOR_VNIC channel.
- */
- { VISOR_VNIC_CHANNEL_GUID, "ultravnic", sizeof(struct channel_header),
- VISOR_VNIC_CHANNEL_VERSIONID },
- {}
-};
-MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
-/* FIXME XXX: This next line of code must be fixed and removed before
- * acceptance into the 'normal' part of the kernel. It is only here as a place
- * holder to get module autoloading functionality working for visorbus. Code
- * must be added to scripts/mode/file2alias.c, etc., to get this working
- * properly.
- */
-MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_GUID_STR);
-
-struct chanstat {
- unsigned long got_rcv;
- unsigned long got_enbdisack;
- unsigned long got_xmit_done;
- unsigned long xmit_fail;
- unsigned long sent_enbdis;
- unsigned long sent_promisc;
- unsigned long sent_post;
- unsigned long sent_post_failed;
- unsigned long sent_xmit;
- unsigned long reject_count;
- unsigned long extra_rcvbufs_sent;
-};
-
-/* struct visornic_devdata
- * @enabled: 0 disabled 1 enabled to receive.
- * @enab_dis_acked: NET_RCV_ENABLE/DISABLE acked by IOPART.
- * @struct *dev:
- * @struct *netdev:
- * @struct net_stats:
- * @interrupt_rcvd:
- * @rsp_queue:
- * @struct **rcvbuf:
- * @incarnation_id: incarnation_id lets IOPART know about
- * re-birth.
- * @old_flags: flags as they were prior to
- * set_multicast_list.
- * @usage: count of users.
- * @num_rcv_bufs: number of rcv buffers the vnic will post.
- * @num_rcv_bufs_could_not_alloc:
- * @num_rcvbuf_in_iovm:
- * @alloc_failed_in_if_needed_cnt:
- * @alloc_failed_in_repost_rtn_cnt:
- * @max_outstanding_net_xmits: absolute max number of outstanding xmits
- * - should never hit this.
- * @upper_threshold_net_xmits: high water mark for calling
- * netif_stop_queue().
- * @lower_threshold_net_xmits: high water mark for calling
- * netif_wake_queue().
- * @struct xmitbufhead: xmitbufhead - head of the xmit buffer list
- * sent to the IOPART end.
- * @server_down_complete_func:
- * @struct timeout_reset:
- * @struct *cmdrsp_rcv: cmdrsp_rcv is used for posting/unposting rcv
- * buffers.
- * @struct *xmit_cmdrsp: xmit_cmdrsp - issues NET_XMIT - only one
- * active xmit at a time.
- * @server_down: IOPART is down.
- * @server_change_state: Processing SERVER_CHANGESTATE msg.
- * @going_away: device is being torn down.
- * @struct *eth_debugfs_dir:
- * @interrupts_rcvd:
- * @interrupts_notme:
- * @interrupts_disabled:
- * @busy_cnt:
- * @priv_lock: spinlock to access devdata structures.
- * @flow_control_upper_hits:
- * @flow_control_lower_hits:
- * @n_rcv0: # rcvs of 0 buffers.
- * @n_rcv1: # rcvs of 1 buffers.
- * @n_rcv2: # rcvs of 2 buffers.
- * @n_rcvx: # rcvs of >2 buffers.
- * @found_repost_rcvbuf_cnt: # repost_rcvbuf_cnt.
- * @repost_found_skb_cnt: # of found the skb.
- * @n_repost_deficit: # of lost rcv buffers.
- * @bad_rcv_buf: # of unknown rcv skb not freed.
- * @n_rcv_packets_not_accepted: # bogs rcv packets.
- * @queuefullmsg_logged:
- * @struct chstat:
- * @struct napi:
- * @struct cmdrsp:
- */
-struct visornic_devdata {
- unsigned short enabled;
- unsigned short enab_dis_acked;
-
- struct visor_device *dev;
- struct net_device *netdev;
- struct net_device_stats net_stats;
- atomic_t interrupt_rcvd;
- wait_queue_head_t rsp_queue;
- struct sk_buff **rcvbuf;
- u64 incarnation_id;
- unsigned short old_flags;
- atomic_t usage;
-
- int num_rcv_bufs;
- int num_rcv_bufs_could_not_alloc;
- atomic_t num_rcvbuf_in_iovm;
- unsigned long alloc_failed_in_if_needed_cnt;
- unsigned long alloc_failed_in_repost_rtn_cnt;
-
- unsigned long max_outstanding_net_xmits;
- unsigned long upper_threshold_net_xmits;
- unsigned long lower_threshold_net_xmits;
- struct sk_buff_head xmitbufhead;
-
- visorbus_state_complete_func server_down_complete_func;
- struct work_struct timeout_reset;
- struct uiscmdrsp *cmdrsp_rcv;
- struct uiscmdrsp *xmit_cmdrsp;
- bool server_down;
- bool server_change_state;
- bool going_away;
- struct dentry *eth_debugfs_dir;
- u64 interrupts_rcvd;
- u64 interrupts_notme;
- u64 interrupts_disabled;
- u64 busy_cnt;
- /* spinlock to access devdata structures. */
- spinlock_t priv_lock;
-
- /* flow control counter */
- u64 flow_control_upper_hits;
- u64 flow_control_lower_hits;
-
- /* debug counters */
- unsigned long n_rcv0;
- unsigned long n_rcv1;
- unsigned long n_rcv2;
- unsigned long n_rcvx;
- unsigned long found_repost_rcvbuf_cnt;
- unsigned long repost_found_skb_cnt;
- unsigned long n_repost_deficit;
- unsigned long bad_rcv_buf;
- unsigned long n_rcv_packets_not_accepted;
-
- int queuefullmsg_logged;
- struct chanstat chstat;
- struct napi_struct napi;
- struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
-};
-
-/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
-static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u16 inp_len,
- u16 index, u16 max_pi_arr_entries,
- struct phys_info pi_arr[])
-{
- u16 i, len, firstlen;
-
- firstlen = PI_PAGE_SIZE - inp_off;
- if (inp_len <= firstlen) {
- /* The input entry spans only one page - add as is. */
- if (index >= max_pi_arr_entries)
- return 0;
- pi_arr[index].pi_pfn = inp_pfn;
- pi_arr[index].pi_off = (u16)inp_off;
- pi_arr[index].pi_len = (u16)inp_len;
- return index + 1;
- }
-
- /* This entry spans multiple pages. */
- for (len = inp_len, i = 0; len;
- len -= pi_arr[index + i].pi_len, i++) {
- if (index + i >= max_pi_arr_entries)
- return 0;
- pi_arr[index + i].pi_pfn = inp_pfn + i;
- if (i == 0) {
- pi_arr[index].pi_off = inp_off;
- pi_arr[index].pi_len = firstlen;
- } else {
- pi_arr[index + i].pi_off = 0;
- pi_arr[index + i].pi_len = min_t(u16, len,
- PI_PAGE_SIZE);
- }
- }
- return index + i;
-}
-
-/* visor_copy_fragsinfo_from_skb - copy fragment list in the SKB to a phys_info
- * array that the IOPART understands
- * @skb: Skbuff that we are pulling the frags from.
- * @firstfraglen: Length of first fragment in skb.
- * @frags_max: Max len of frags array.
- * @frags: Frags array filled in on output.
- *
- * Return: Positive integer indicating number of entries filled in frags on
- * success, negative integer on error.
- */
-static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
- unsigned int firstfraglen,
- unsigned int frags_max,
- struct phys_info frags[])
-{
- unsigned int count = 0, frag, size, offset = 0, numfrags;
- unsigned int total_count;
-
- numfrags = skb_shinfo(skb)->nr_frags;
-
- /* Compute the number of fragments this skb has, and if its more than
- * frag array can hold, linearize the skb
- */
- total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
- if (firstfraglen % PI_PAGE_SIZE)
- total_count++;
-
- if (total_count > frags_max) {
- if (skb_linearize(skb))
- return -EINVAL;
- numfrags = skb_shinfo(skb)->nr_frags;
- firstfraglen = 0;
- }
-
- while (firstfraglen) {
- if (count == frags_max)
- return -EINVAL;
-
- frags[count].pi_pfn =
- page_to_pfn(virt_to_page(skb->data + offset));
- frags[count].pi_off =
- (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
- size = min_t(unsigned int, firstfraglen,
- PI_PAGE_SIZE - frags[count].pi_off);
-
- /* can take smallest of firstfraglen (what's left) OR
- * bytes left in the page
- */
- frags[count].pi_len = size;
- firstfraglen -= size;
- offset += size;
- count++;
- }
- if (numfrags) {
- if ((count + numfrags) > frags_max)
- return -EINVAL;
-
- for (frag = 0; frag < numfrags; frag++) {
- count = add_physinfo_entries(page_to_pfn(
- skb_frag_page(&skb_shinfo(skb)->frags[frag])),
- skb_frag_off(&skb_shinfo(skb)->frags[frag]),
- skb_frag_size(&skb_shinfo(skb)->frags[frag]),
- count, frags_max, frags);
- /* add_physinfo_entries only returns
- * zero if the frags array is out of room
- * That should never happen because we
- * fail above, if count+numfrags > frags_max.
- */
- if (!count)
- return -EINVAL;
- }
- }
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *skbinlist;
- int c;
-
- for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
- skbinlist = skbinlist->next) {
- c = visor_copy_fragsinfo_from_skb(skbinlist,
- skbinlist->len -
- skbinlist->data_len,
- frags_max - count,
- &frags[count]);
- if (c < 0)
- return c;
- count += c;
- }
- }
- return count;
-}
-
-static ssize_t enable_ints_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- /* Don't want to break ABI here by having a debugfs
- * file that no longer exists or is writable, so
- * lets just make this a vestigual function
- */
- return count;
-}
-
-static const struct file_operations debugfs_enable_ints_fops = {
- .write = enable_ints_write,
-};
-
-/* visornic_serverdown_complete - pause device following IOPART going down
- * @devdata: Device managed by IOPART.
- *
- * The IO partition has gone down, and we need to do some cleanup for when it
- * comes back. Treat the IO partition as the link being down.
- */
-static void visornic_serverdown_complete(struct visornic_devdata *devdata)
-{
- struct net_device *netdev = devdata->netdev;
-
- /* Stop polling for interrupts */
- visorbus_disable_channel_interrupts(devdata->dev);
-
- rtnl_lock();
- dev_close(netdev);
- rtnl_unlock();
-
- atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
- devdata->chstat.sent_xmit = 0;
- devdata->chstat.got_xmit_done = 0;
-
- if (devdata->server_down_complete_func)
- (*devdata->server_down_complete_func)(devdata->dev, 0);
-
- devdata->server_down = true;
- devdata->server_change_state = false;
- devdata->server_down_complete_func = NULL;
-}
-
-/* visornic_serverdown - Command has notified us that IOPART is down
- * @devdata: Device managed by IOPART.
- * @complete_func: Function to call when finished.
- *
- * Schedule the work needed to handle the server down request. Make sure we
- * haven't already handled the server change state event.
- *
- * Return: 0 if we scheduled the work, negative integer on error.
- */
-static int visornic_serverdown(struct visornic_devdata *devdata,
- visorbus_state_complete_func complete_func)
-{
- unsigned long flags;
- int err;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->server_change_state) {
- dev_dbg(&devdata->dev->device, "%s changing state\n",
- __func__);
- err = -EINVAL;
- goto err_unlock;
- }
- if (devdata->server_down) {
- dev_dbg(&devdata->dev->device, "%s already down\n",
- __func__);
- err = -EINVAL;
- goto err_unlock;
- }
- if (devdata->going_away) {
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- err = -ENODEV;
- goto err_unlock;
- }
- devdata->server_change_state = true;
- devdata->server_down_complete_func = complete_func;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- visornic_serverdown_complete(devdata);
- return 0;
-
-err_unlock:
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return err;
-}
-
-/* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition
- * @netdev: Network adapter the rcv bufs are attached too.
- *
- * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
- * so that it can write rcv data into our memory space.
- *
- * Return: Pointer to sk_buff.
- */
-static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
-{
- struct sk_buff *skb;
-
- /* NOTE: the first fragment in each rcv buffer is pointed to by
- * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
- * in length, so the first frag is large enough to hold 1514.
- */
- skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
- skb->dev = netdev;
- /* current value of mtu doesn't come into play here; large
- * packets will just end up using multiple rcv buffers all of
- * same size.
- */
- skb->len = RCVPOST_BUF_SIZE;
- /* alloc_skb already zeroes it out for clarification. */
- skb->data_len = 0;
- return skb;
-}
-
-/* post_skb - post a skb to the IO Partition
- * @cmdrsp: Cmdrsp packet to be send to the IO Partition.
- * @devdata: visornic_devdata to post the skb to.
- * @skb: Skb to give to the IO partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
- struct sk_buff *skb)
-{
- int err;
-
- cmdrsp->net.buf = skb;
- cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
- cmdrsp->net.rcvpost.frag.pi_off =
- (unsigned long)skb->data & PI_PAGE_MASK;
- cmdrsp->net.rcvpost.frag.pi_len = skb->len;
- cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
-
- if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
- return -EINVAL;
-
- cmdrsp->net.type = NET_RCV_POST;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- if (err) {
- devdata->chstat.sent_post_failed++;
- return err;
- }
-
- atomic_inc(&devdata->num_rcvbuf_in_iovm);
- devdata->chstat.sent_post++;
- return 0;
-}
-
-/* send_enbdis - Send NET_RCV_ENBDIS to IO Partition
- * @netdev: Netdevice we are enabling/disabling, used as context return value.
- * @state: Enable = 1/disable = 0.
- * @devdata: Visornic device we are enabling/disabling.
- *
- * Send the enable/disable message to the IO Partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int send_enbdis(struct net_device *netdev, int state,
- struct visornic_devdata *devdata)
-{
- int err;
-
- devdata->cmdrsp_rcv->net.enbdis.enable = state;
- devdata->cmdrsp_rcv->net.enbdis.context = netdev;
- devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
- devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv);
- if (err)
- return err;
- devdata->chstat.sent_enbdis++;
- return 0;
-}
-
-/* visornic_disable_with_timeout - disable network adapter
- * @netdev: netdevice to disable.
- * @timeout: Timeout to wait for disable.
- *
- * Disable the network adapter and inform the IO Partition that we are disabled.
- * Reclaim memory from rcv bufs.
- *
- * Return: 0 on success, negative integer on failure of IO Partition responding.
- */
-static int visornic_disable_with_timeout(struct net_device *netdev,
- const int timeout)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
- int i;
- unsigned long flags;
- int wait = 0;
- int err;
-
- /* send a msg telling the other end we are stopping incoming pkts */
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = 0;
- /* must wait for ack */
- devdata->enab_dis_acked = 0;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* send disable and wait for ack -- don't hold lock when sending
- * disable because if the queue is full, insert might sleep.
- * If an error occurs, don't wait for the timeout.
- */
- err = send_enbdis(netdev, 0, devdata);
- if (err)
- return err;
-
- /* wait for ack to arrive before we try to free rcv buffers
- * NOTE: the other end automatically unposts the rcv buffers
- * when it gets a disable.
- */
- spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
- (wait < timeout)) {
- if (devdata->enab_dis_acked)
- break;
- if (devdata->server_down || devdata->server_change_state) {
- dev_dbg(&netdev->dev, "%s server went away\n",
- __func__);
- break;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
- /* Wait for usage to go to 1 (no other users) before freeing
- * rcv buffers
- */
- if (atomic_read(&devdata->usage) > 1) {
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (atomic_read(&devdata->usage))
- break;
- }
- }
- /* we've set enabled to 0, so we can give up the lock. */
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* stop the transmit queue so nothing more can be transmitted */
- netif_stop_queue(netdev);
-
- napi_disable(&devdata->napi);
-
- skb_queue_purge(&devdata->xmitbufhead);
-
- /* Free rcv buffers - other end has automatically unposed them on
- * disable
- */
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i]) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- }
- }
-
- return 0;
-}
-
-/* init_rcv_bufs - initialize receive buffs and send them to the IO Partition
- * @netdev: struct netdevice.
- * @devdata: visornic_devdata.
- *
- * Allocate rcv buffers and post them to the IO Partition.
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int init_rcv_bufs(struct net_device *netdev,
- struct visornic_devdata *devdata)
-{
- int i, j, count, err;
-
- /* allocate fixed number of receive buffers to post to uisnic
- * post receive buffers after we've allocated a required amount
- */
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- /* if we failed to allocate one let us stop */
- if (!devdata->rcvbuf[i])
- break;
- }
- /* couldn't even allocate one -- bail out */
- if (i == 0)
- return -ENOMEM;
- count = i;
-
- /* Ensure we can alloc 2/3rd of the requested number of buffers.
- * 2/3 is an arbitrary choice; used also in ndis init.c
- */
- if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
- /* free receive buffers we did alloc and then bail out */
- for (i = 0; i < count; i++) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- }
- return -ENOMEM;
- }
-
- /* post receive buffers to receive incoming input - without holding
- * lock - we've not enabled nor started the queue so there shouldn't
- * be any rcv or xmit activity
- */
- for (i = 0; i < count; i++) {
- err = post_skb(devdata->cmdrsp_rcv, devdata,
- devdata->rcvbuf[i]);
- if (!err)
- continue;
-
- /* Error handling -
- * If we posted at least one skb, we should return success,
- * but need to free the resources that we have not successfully
- * posted.
- */
- for (j = i; j < count; j++) {
- kfree_skb(devdata->rcvbuf[j]);
- devdata->rcvbuf[j] = NULL;
- }
- if (i == 0)
- return err;
- break;
- }
-
- return 0;
-}
-
-/* visornic_enable_with_timeout - send enable to IO Partition
- * @netdev: struct net_device.
- * @timeout: Time to wait for the ACK from the enable.
- *
- * Sends enable to IOVM and inits, and posts receive buffers to IOVM. Timeout is
- * defined in msecs (timeout of 0 specifies infinite wait).
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int visornic_enable_with_timeout(struct net_device *netdev,
- const int timeout)
-{
- int err = 0;
- struct visornic_devdata *devdata = netdev_priv(netdev);
- unsigned long flags;
- int wait = 0;
-
- napi_enable(&devdata->napi);
-
- /* NOTE: the other end automatically unposts the rcv buffers when it
- * gets a disable.
- */
- err = init_rcv_bufs(netdev, devdata);
- if (err < 0) {
- dev_err(&netdev->dev,
- "%s failed to init rcv bufs\n", __func__);
- return err;
- }
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = 1;
- devdata->enab_dis_acked = 0;
-
- /* now we're ready, let's send an ENB to uisnic but until we get
- * an ACK back from uisnic, we'll drop the packets
- */
- devdata->n_rcv_packets_not_accepted = 0;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* send enable and wait for ack -- don't hold lock when sending enable
- * because if the queue is full, insert might sleep. If an error
- * occurs error out.
- */
- err = send_enbdis(netdev, 1, devdata);
- if (err)
- return err;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
- (wait < timeout)) {
- if (devdata->enab_dis_acked)
- break;
- if (devdata->server_down || devdata->server_change_state) {
- dev_dbg(&netdev->dev, "%s server went away\n",
- __func__);
- break;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- if (!devdata->enab_dis_acked) {
- dev_err(&netdev->dev, "%s missing ACK\n", __func__);
- return -EIO;
- }
-
- netif_start_queue(netdev);
- return 0;
-}
-
-/* visornic_timeout_reset - handle xmit timeout resets
- * @work: Work item that scheduled the work.
- *
- * Transmit timeouts are typically handled by resetting the device for our
- * virtual NIC; we will send a disable and enable to the IOVM. If it doesn't
- * respond, we will trigger a serverdown.
- */
-static void visornic_timeout_reset(struct work_struct *work)
-{
- struct visornic_devdata *devdata;
- struct net_device *netdev;
- int response = 0;
-
- devdata = container_of(work, struct visornic_devdata, timeout_reset);
- netdev = devdata->netdev;
-
- rtnl_lock();
- if (!netif_running(netdev)) {
- rtnl_unlock();
- return;
- }
-
- response = visornic_disable_with_timeout(netdev,
- VISORNIC_INFINITE_RSP_WAIT);
- if (response)
- goto call_serverdown;
-
- response = visornic_enable_with_timeout(netdev,
- VISORNIC_INFINITE_RSP_WAIT);
- if (response)
- goto call_serverdown;
-
- rtnl_unlock();
-
- return;
-
-call_serverdown:
- visornic_serverdown(devdata, NULL);
- rtnl_unlock();
-}
-
-/* visornic_open - enable the visornic device and mark the queue started
- * @netdev: netdevice to start.
- *
- * Enable the device and start the transmit queue.
- *
- * Return: 0 on success.
- */
-static int visornic_open(struct net_device *netdev)
-{
- visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
- return 0;
-}
-
-/* visornic_close - disables the visornic device and stops the queues
- * @netdev: netdevice to stop.
- *
- * Disable the device and stop the transmit queue.
- *
- * Return 0 on success.
- */
-static int visornic_close(struct net_device *netdev)
-{
- visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
- return 0;
-}
-
-/* devdata_xmits_outstanding - compute outstanding xmits
- * @devdata: visornic_devdata for device
- *
- * Return: Long integer representing the number of outstanding xmits.
- */
-static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
-{
- if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
- return devdata->chstat.sent_xmit -
- devdata->chstat.got_xmit_done;
- return (ULONG_MAX - devdata->chstat.got_xmit_done
- + devdata->chstat.sent_xmit + 1);
-}
-
-/* vnic_hit_high_watermark
- * @devdata: Indicates visornic device we are checking.
- * @high_watermark: Max num of unacked xmits we will tolerate before we will
- * start throttling.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is >=
- * high_watermark. False otherwise.
- */
-static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
- ulong high_watermark)
-{
- return (devdata_xmits_outstanding(devdata) >= high_watermark);
-}
-
-/* vnic_hit_low_watermark
- * @devdata: Indicates visornic device we are checking.
- * @low_watermark: We will wait until the num of unacked xmits drops to this
- * value or lower before we start transmitting again.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is <=
- * low_watermark.
- */
-static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
- ulong low_watermark)
-{
- return (devdata_xmits_outstanding(devdata) <= low_watermark);
-}
-
-/* visornic_xmit - send a packet to the IO Partition
- * @skb: Packet to be sent.
- * @netdev: Net device the packet is being sent from.
- *
- * Convert the skb to a cmdrsp so the IO Partition can understand it, and send
- * the XMIT command to the IO Partition for processing. This function is
- * protected from concurrent calls by a spinlock xmit_lock in the net_device
- * struct. As soon as the function returns, it can be called again.
- *
- * Return: NETDEV_TX_OK.
- */
-static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- struct visornic_devdata *devdata;
- int len, firstfraglen, padlen;
- struct uiscmdrsp *cmdrsp = NULL;
- unsigned long flags;
- int err;
-
- devdata = netdev_priv(netdev);
- spin_lock_irqsave(&devdata->priv_lock, flags);
-
- if (netif_queue_stopped(netdev) || devdata->server_down ||
- devdata->server_change_state) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - queue stopped\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* sk_buff struct is used to host network data throughout all the
- * linux network subsystems
- */
- len = skb->len;
-
- /* skb->len is the FULL length of data (including fragmentary portion)
- * skb->data_len is the length of the fragment portion in frags
- * skb->len - skb->data_len is size of the 1st fragment in skb->data
- * calculate the length of the first fragment that skb->data is
- * pointing to
- */
- firstfraglen = skb->len - skb->data_len;
- if (firstfraglen < ETH_HLEN) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_err(&netdev->dev,
- "%s busy - first frag too small (%d)\n",
- __func__, firstfraglen);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- if (len < ETH_MIN_PACKET_SIZE &&
- ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
- /* pad the packet out to minimum size */
- padlen = ETH_MIN_PACKET_SIZE - len;
- skb_put_zero(skb, padlen);
- len += padlen;
- firstfraglen += padlen;
- }
-
- cmdrsp = devdata->xmit_cmdrsp;
- /* clear cmdrsp */
- memset(cmdrsp, 0, SIZEOF_CMDRSP);
- cmdrsp->net.type = NET_XMIT;
- cmdrsp->cmdtype = CMD_NET_TYPE;
-
- /* save the pointer to skb -- we'll need it for completion */
- cmdrsp->net.buf = skb;
-
- if (vnic_hit_high_watermark(devdata,
- devdata->max_outstanding_net_xmits)) {
- /* extra NET_XMITs queued over to IOVM - need to wait */
- devdata->chstat.reject_count++;
- if (!devdata->queuefullmsg_logged &&
- ((devdata->chstat.reject_count & 0x3ff) == 1))
- devdata->queuefullmsg_logged = 1;
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - waiting for iovm to catch up\n",
- __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- if (devdata->queuefullmsg_logged)
- devdata->queuefullmsg_logged = 0;
-
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- cmdrsp->net.xmt.lincsum.valid = 1;
- cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
- if (skb_transport_header(skb) > skb->data) {
- cmdrsp->net.xmt.lincsum.hrawoff =
- skb_transport_header(skb) - skb->data;
- cmdrsp->net.xmt.lincsum.hrawoff = 1;
- }
- if (skb_network_header(skb) > skb->data) {
- cmdrsp->net.xmt.lincsum.nhrawoff =
- skb_network_header(skb) - skb->data;
- cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
- }
- cmdrsp->net.xmt.lincsum.csum = skb->csum;
- } else {
- cmdrsp->net.xmt.lincsum.valid = 0;
- }
-
- /* save off the length of the entire data packet */
- cmdrsp->net.xmt.len = len;
-
- /* copy ethernet header from first frag into ocmdrsp
- * - everything else will be pass in frags & DMA'ed
- */
- memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
-
- /* copy frags info - from skb->data we need to only provide access
- * beyond eth header
- */
- cmdrsp->net.xmt.num_frags =
- visor_copy_fragsinfo_from_skb(skb, firstfraglen,
- MAX_PHYS_INFO,
- cmdrsp->net.xmt.frags);
- if (cmdrsp->net.xmt.num_frags < 0) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_err(&netdev->dev,
- "%s busy - copy frags failed\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART, cmdrsp);
- if (err) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - signalinsert failed\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* Track the skbs that have been sent to the IOVM for XMIT */
- skb_queue_head(&devdata->xmitbufhead, skb);
-
- /* update xmt stats */
- devdata->net_stats.tx_packets++;
- devdata->net_stats.tx_bytes += skb->len;
- devdata->chstat.sent_xmit++;
-
- /* check if we have hit the high watermark for netif_stop_queue() */
- if (vnic_hit_high_watermark(devdata,
- devdata->upper_threshold_net_xmits)) {
- /* extra NET_XMITs queued over to IOVM - need to wait */
- /* stop queue - call netif_wake_queue() after lower threshold */
- netif_stop_queue(netdev);
- dev_dbg(&netdev->dev,
- "%s busy - invoking iovm flow control\n",
- __func__);
- devdata->flow_control_upper_hits++;
- }
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* skb will be freed when we get back NET_XMIT_DONE */
- return NETDEV_TX_OK;
-}
-
-/* visornic_get_stats - returns net_stats of the visornic device
- * @netdev: netdevice.
- *
- * Return: Pointer to the net_device_stats struct for the device.
- */
-static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
-
- return &devdata->net_stats;
-}
-
-/* visornic_change_mtu - changes mtu of device
- * @netdev: netdevice.
- * @new_mtu: Value of new mtu.
- *
- * The device's MTU cannot be changed by system; it must be changed via a
- * CONTROLVM message. All vnics and pnics in a switch have to have the same MTU
- * for everything to work. Currently not supported.
- *
- * Return: -EINVAL.
- */
-static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
-{
- return -EINVAL;
-}
-
-/* visornic_set_multi - set visornic device flags
- * @netdev: netdevice.
- *
- * The only flag we currently support is IFF_PROMISC.
- */
-static void visornic_set_multi(struct net_device *netdev)
-{
- struct uiscmdrsp *cmdrsp;
- struct visornic_devdata *devdata = netdev_priv(netdev);
- int err = 0;
-
- if (devdata->old_flags == netdev->flags)
- return;
-
- if ((netdev->flags & IFF_PROMISC) ==
- (devdata->old_flags & IFF_PROMISC))
- goto out_save_flags;
-
- cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
- if (!cmdrsp)
- return;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- cmdrsp->net.type = NET_RCV_PROMISC;
- cmdrsp->net.enbdis.context = netdev;
- cmdrsp->net.enbdis.enable =
- netdev->flags & IFF_PROMISC;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- kfree(cmdrsp);
- if (err)
- return;
-
-out_save_flags:
- devdata->old_flags = netdev->flags;
-}
-
-/* visornic_xmit_timeout - request to timeout the xmit
- * @netdev: netdevice.
- *
- * Queue the work and return. Make sure we have not already been informed that
- * the IO Partition is gone; if so, we will have already timed-out the xmits.
- */
-static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
- unsigned long flags;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- return;
- }
-
- /* Ensure that a ServerDown message hasn't been received */
- if (!devdata->enabled ||
- (devdata->server_down && !devdata->server_change_state)) {
- dev_dbg(&netdev->dev, "%s no processing\n",
- __func__);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return;
- }
- schedule_work(&devdata->timeout_reset);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* repost_return - repost rcv bufs that have come back
- * @cmdrsp: IO channel command struct to post.
- * @devdata: Visornic devdata for the device.
- * @skb: Socket buffer.
- * @netdev: netdevice.
- *
- * Repost rcv buffers that have been returned to us when we are finished
- * with them.
- *
- * Return: 0 for success, negative integer on error.
- */
-static int repost_return(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata,
- struct sk_buff *skb, struct net_device *netdev)
-{
- struct net_pkt_rcv copy;
- int i = 0, cc, numreposted;
- int found_skb = 0;
- int status = 0;
-
- copy = cmdrsp->net.rcv;
- switch (copy.numrcvbufs) {
- case 0:
- devdata->n_rcv0++;
- break;
- case 1:
- devdata->n_rcv1++;
- break;
- case 2:
- devdata->n_rcv2++;
- break;
- default:
- devdata->n_rcvx++;
- break;
- }
- for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
- continue;
-
- if ((skb) && devdata->rcvbuf[i] == skb) {
- devdata->found_repost_rcvbuf_cnt++;
- found_skb = 1;
- devdata->repost_found_skb_cnt++;
- }
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- if (!devdata->rcvbuf[i]) {
- devdata->num_rcv_bufs_could_not_alloc++;
- devdata->alloc_failed_in_repost_rtn_cnt++;
- status = -ENOMEM;
- break;
- }
- status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
- if (status) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- break;
- }
- numreposted++;
- break;
- }
- }
- if (numreposted != copy.numrcvbufs) {
- devdata->n_repost_deficit++;
- status = -EINVAL;
- }
- if (skb) {
- if (found_skb) {
- kfree_skb(skb);
- } else {
- status = -EINVAL;
- devdata->bad_rcv_buf++;
- }
- }
- return status;
-}
-
-/* visornic_rx - handle receive packets coming back from IO Partition
- * @cmdrsp: Receive packet returned from IO Partition.
- *
- * Got a receive packet back from the IO Partition; handle it and send it up
- * the stack.
-
- * Return: 1 iff an skb was received, otherwise 0.
- */
-static int visornic_rx(struct uiscmdrsp *cmdrsp)
-{
- struct visornic_devdata *devdata;
- struct sk_buff *skb, *prev, *curr;
- struct net_device *netdev;
- int cc, currsize, off;
- struct ethhdr *eth;
- unsigned long flags;
-
- /* post new rcv buf to the other end using the cmdrsp we have at hand
- * post it without holding lock - but we'll use the signal lock to
- * synchronize the queue insert the cmdrsp that contains the net.rcv
- * is the one we are using to repost, so copy the info we need from it.
- */
- skb = cmdrsp->net.buf;
- netdev = skb->dev;
-
- devdata = netdev_priv(netdev);
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- atomic_dec(&devdata->num_rcvbuf_in_iovm);
-
- /* set length to how much was ACTUALLY received -
- * NOTE: rcv_done_len includes actual length of data rcvd
- * including ethhdr
- */
- skb->len = cmdrsp->net.rcv.rcv_done_len;
-
- /* update rcv stats - call it with priv_lock held */
- devdata->net_stats.rx_packets++;
- devdata->net_stats.rx_bytes += skb->len;
-
- /* test enabled while holding lock */
- if (!(devdata->enabled && devdata->enab_dis_acked)) {
- /* don't process it unless we're in enable mode and until
- * we've gotten an ACK saying the other end got our RCV enable
- */
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- repost_return(cmdrsp, devdata, skb, netdev);
- return 0;
- }
-
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* when skb was allocated, skb->dev, skb->data, skb->len and
- * skb->data_len were setup. AND, data has already put into the
- * skb (both first frag and in frags pages)
- * NOTE: firstfragslen is the amount of data in skb->data and that
- * which is not in nr_frags or frag_list. This is now simply
- * RCVPOST_BUF_SIZE. bump tail to show how much data is in
- * firstfrag & set data_len to show rest see if we have to chain
- * frag_list.
- */
- /* do PRECAUTIONARY check */
- if (skb->len > RCVPOST_BUF_SIZE) {
- if (cmdrsp->net.rcv.numrcvbufs < 2) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev,
- "repost_return failed");
- return 0;
- }
- /* length rcvd is greater than firstfrag in this skb rcv buf */
- /* amount in skb->data */
- skb->tail += RCVPOST_BUF_SIZE;
- /* amount that will be in frag_list */
- skb->data_len = skb->len - RCVPOST_BUF_SIZE;
- } else {
- /* data fits in this skb - no chaining - do
- * PRECAUTIONARY check
- */
- /* should be 1 */
- if (cmdrsp->net.rcv.numrcvbufs != 1) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev,
- "repost_return failed");
- return 0;
- }
- skb->tail += skb->len;
- /* nothing rcvd in frag_list */
- skb->data_len = 0;
- }
- off = skb_tail_pointer(skb) - skb->data;
-
- /* amount we bumped tail by in the head skb
- * it is used to calculate the size of each chained skb below
- * it is also used to index into bufline to continue the copy
- * (for chansocktwopc)
- * if necessary chain the rcv skbs together.
- * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
- * chain the rest to that one.
- * - do PRECAUTIONARY check
- */
- if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev, "repost_return failed");
- return 0;
- }
-
- if (cmdrsp->net.rcv.numrcvbufs > 1) {
- /* chain the various rcv buffers into the skb's frag_list. */
- /* Note: off was initialized above */
- for (cc = 1, prev = NULL;
- cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
- curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
- curr->next = NULL;
- /* start of list- set head */
- if (!prev)
- skb_shinfo(skb)->frag_list = curr;
- else
- prev->next = curr;
- prev = curr;
-
- /* should we set skb->len and skb->data_len for each
- * buffer being chained??? can't hurt!
- */
- currsize = min(skb->len - off,
- (unsigned int)RCVPOST_BUF_SIZE);
- curr->len = currsize;
- curr->tail += currsize;
- curr->data_len = 0;
- off += currsize;
- }
- /* assert skb->len == off */
- if (skb->len != off) {
- netdev_err(devdata->netdev,
- "something wrong; skb->len:%d != off:%d\n",
- skb->len, off);
- }
- }
-
- /* set up packet's protocol type using ethernet header - this
- * sets up skb->pkt_type & it also PULLS out the eth header
- */
- skb->protocol = eth_type_trans(skb, netdev);
- eth = eth_hdr(skb);
- skb->csum = 0;
- skb->ip_summed = CHECKSUM_NONE;
-
- do {
- /* accept all packets */
- if (netdev->flags & IFF_PROMISC)
- break;
- if (skb->pkt_type == PACKET_BROADCAST) {
- /* accept all broadcast packets */
- if (netdev->flags & IFF_BROADCAST)
- break;
- } else if (skb->pkt_type == PACKET_MULTICAST) {
- if ((netdev->flags & IFF_MULTICAST) &&
- (netdev_mc_count(netdev))) {
- struct netdev_hw_addr *ha;
- int found_mc = 0;
-
- /* only accept multicast packets that we can
- * find in our multicast address list
- */
- netdev_for_each_mc_addr(ha, netdev) {
- if (ether_addr_equal(eth->h_dest,
- ha->addr)) {
- found_mc = 1;
- break;
- }
- }
- /* accept pkt, dest matches a multicast addr */
- if (found_mc)
- break;
- }
- /* accept packet, h_dest must match vnic mac address */
- } else if (skb->pkt_type == PACKET_HOST) {
- break;
- } else if (skb->pkt_type == PACKET_OTHERHOST) {
- /* something is not right */
- dev_err(&devdata->netdev->dev,
- "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
- netdev->name, eth->h_dest, netdev->dev_addr);
- }
- /* drop packet - don't forward it up to OS */
- devdata->n_rcv_packets_not_accepted++;
- repost_return(cmdrsp, devdata, skb, netdev);
- return 0;
- } while (0);
-
- netif_receive_skb(skb);
- /* netif_rx returns various values, but "in practice most drivers
- * ignore the return value
- */
-
- skb = NULL;
- /* whether the packet got dropped or handled, the skb is freed by
- * kernel code, so we shouldn't free it. but we should repost a
- * new rcv buffer.
- */
- repost_return(cmdrsp, devdata, skb, netdev);
- return 1;
-}
-
-/* devdata_initialize - initialize devdata structure
- * @devdata: visornic_devdata structure to initialize.
- * @dev: visorbus_device it belongs to.
- *
- * Setup initial values for the visornic, based on channel and default values.
- *
- * Return: A pointer to the devdata structure.
- */
-static struct visornic_devdata *devdata_initialize(
- struct visornic_devdata *devdata,
- struct visor_device *dev)
-{
- devdata->dev = dev;
- devdata->incarnation_id = get_jiffies_64();
- return devdata;
-}
-
-/* devdata_release - free up references in devdata
- * @devdata: Struct to clean up.
- */
-static void devdata_release(struct visornic_devdata *devdata)
-{
- kfree(devdata->rcvbuf);
- kfree(devdata->cmdrsp_rcv);
- kfree(devdata->xmit_cmdrsp);
-}
-
-static const struct net_device_ops visornic_dev_ops = {
- .ndo_open = visornic_open,
- .ndo_stop = visornic_close,
- .ndo_start_xmit = visornic_xmit,
- .ndo_get_stats = visornic_get_stats,
- .ndo_change_mtu = visornic_change_mtu,
- .ndo_tx_timeout = visornic_xmit_timeout,
- .ndo_set_rx_mode = visornic_set_multi,
-};
-
-/* DebugFS code */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
-{
- ssize_t bytes_read = 0;
- int str_pos = 0;
- struct visornic_devdata *devdata;
- struct net_device *dev;
- char *vbuf;
-
- if (len > MAX_BUF)
- len = MAX_BUF;
- vbuf = kzalloc(len, GFP_KERNEL);
- if (!vbuf)
- return -ENOMEM;
-
- /* for each vnic channel dump out channel specific data */
- rcu_read_lock();
- for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
- /* Only consider netdevs that are visornic, and are open */
- if (dev->netdev_ops != &visornic_dev_ops ||
- (!netif_queue_stopped(dev)))
- continue;
-
- devdata = netdev_priv(dev);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "netdev = %s (0x%p), MAC Addr %pM\n",
- dev->name,
- dev,
- dev->dev_addr);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "VisorNic Dev Info = 0x%p\n", devdata);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcv_bufs = %d\n",
- devdata->num_rcv_bufs);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " max_outstanding_next_xmits = %lu\n",
- devdata->max_outstanding_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " upper_threshold_net_xmits = %lu\n",
- devdata->upper_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " lower_threshold_net_xmits = %lu\n",
- devdata->lower_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " queuefullmsg_logged = %d\n",
- devdata->queuefullmsg_logged);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_rcv = %lu\n",
- devdata->chstat.got_rcv);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_enbdisack = %lu\n",
- devdata->chstat.got_enbdisack);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_xmit_done = %lu\n",
- devdata->chstat.got_xmit_done);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.xmit_fail = %lu\n",
- devdata->chstat.xmit_fail);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_enbdis = %lu\n",
- devdata->chstat.sent_enbdis);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_promisc = %lu\n",
- devdata->chstat.sent_promisc);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_post = %lu\n",
- devdata->chstat.sent_post);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_post_failed = %lu\n",
- devdata->chstat.sent_post_failed);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_xmit = %lu\n",
- devdata->chstat.sent_xmit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.reject_count = %lu\n",
- devdata->chstat.reject_count);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.extra_rcvbufs_sent = %lu\n",
- devdata->chstat.extra_rcvbufs_sent);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv0 = %lu\n", devdata->n_rcv0);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv1 = %lu\n", devdata->n_rcv1);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv2 = %lu\n", devdata->n_rcv2);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcvx = %lu\n", devdata->n_rcvx);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcvbuf_in_iovm = %d\n",
- atomic_read(&devdata->num_rcvbuf_in_iovm));
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_if_needed_cnt = %lu\n",
- devdata->alloc_failed_in_if_needed_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_repost_rtn_cnt = %lu\n",
- devdata->alloc_failed_in_repost_rtn_cnt);
- /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- * " inner_loop_limit_reached_cnt = %lu\n",
- * devdata->inner_loop_limit_reached_cnt);
- */
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " found_repost_rcvbuf_cnt = %lu\n",
- devdata->found_repost_rcvbuf_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " repost_found_skb_cnt = %lu\n",
- devdata->repost_found_skb_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_repost_deficit = %lu\n",
- devdata->n_repost_deficit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " bad_rcv_buf = %lu\n",
- devdata->bad_rcv_buf);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv_packets_not_accepted = %lu\n",
- devdata->n_rcv_packets_not_accepted);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_rcvd = %llu\n",
- devdata->interrupts_rcvd);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_notme = %llu\n",
- devdata->interrupts_notme);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_disabled = %llu\n",
- devdata->interrupts_disabled);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " busy_cnt = %llu\n",
- devdata->busy_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_upper_hits = %llu\n",
- devdata->flow_control_upper_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_lower_hits = %llu\n",
- devdata->flow_control_lower_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " netif_queue = %s\n",
- netif_queue_stopped(devdata->netdev) ?
- "stopped" : "running");
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " xmits_outstanding = %lu\n",
- devdata_xmits_outstanding(devdata));
- }
- rcu_read_unlock();
- bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
- kfree(vbuf);
- return bytes_read;
-}
-
-static struct dentry *visornic_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
- .read = info_debugfs_read,
-};
-
-/* send_rcv_posts_if_needed - send receive buffers to the IO Partition.
- * @devdata: Visornic device.
- */
-static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
-{
- int i;
- struct net_device *netdev;
- struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
- int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
- int err;
-
- /* don't do this until vnic is marked ready */
- if (!(devdata->enabled && devdata->enab_dis_acked))
- return;
-
- netdev = devdata->netdev;
- rcv_bufs_allocated = 0;
- /* this code is trying to prevent getting stuck here forever,
- * but still retry it if you can't allocate them all this time.
- */
- cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
- while (cur_num_rcv_bufs_to_alloc > 0) {
- cur_num_rcv_bufs_to_alloc--;
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i])
- continue;
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- if (!devdata->rcvbuf[i]) {
- devdata->alloc_failed_in_if_needed_cnt++;
- break;
- }
- rcv_bufs_allocated++;
- err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
- if (err) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- break;
- }
- devdata->chstat.extra_rcvbufs_sent++;
- }
- }
- devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
-}
-
-/* drain_resp_queue - drains and ignores all messages from the resp queue
- * @cmdrsp: IO channel command response message.
- * @devdata: Visornic device to drain.
- */
-static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata)
-{
- while (!visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- ;
-}
-
-/* service_resp_queue - drain the response queue
- * @cmdrsp: IO channel command response message.
- * @devdata: Visornic device to drain.
- * @rx_work_done:
- * @budget:
- *
- * Drain the response queue of any responses from the IO Partition. Process the
- * responses as we get them.
- */
-static void service_resp_queue(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata,
- int *rx_work_done, int budget)
-{
- unsigned long flags;
- struct net_device *netdev;
-
- while (*rx_work_done < budget) {
- /* TODO: CLIENT ACQUIRE -- Don't really need this at the
- * moment
- */
- /* queue empty */
- if (visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- break;
-
- switch (cmdrsp->net.type) {
- case NET_RCV:
- devdata->chstat.got_rcv++;
- /* process incoming packet */
- *rx_work_done += visornic_rx(cmdrsp);
- break;
- case NET_XMIT_DONE:
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->chstat.got_xmit_done++;
- if (cmdrsp->net.xmtdone.xmt_done_result)
- devdata->chstat.xmit_fail++;
- /* only call queue wake if we stopped it */
- netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
- /* ASSERT netdev == vnicinfo->netdev; */
- if (netdev == devdata->netdev &&
- netif_queue_stopped(netdev)) {
- /* check if we have crossed the lower watermark
- * for netif_wake_queue()
- */
- if (vnic_hit_low_watermark
- (devdata,
- devdata->lower_threshold_net_xmits)) {
- /* enough NET_XMITs completed
- * so can restart netif queue
- */
- netif_wake_queue(netdev);
- devdata->flow_control_lower_hits++;
- }
- }
- skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- kfree_skb(cmdrsp->net.buf);
- break;
- case NET_RCV_ENBDIS_ACK:
- devdata->chstat.got_enbdisack++;
- netdev = (struct net_device *)
- cmdrsp->net.enbdis.context;
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enab_dis_acked = 1;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- if (devdata->server_down &&
- devdata->server_change_state) {
- /* Inform Linux that the link is up */
- devdata->server_down = false;
- devdata->server_change_state = false;
- netif_wake_queue(netdev);
- netif_carrier_on(netdev);
- }
- break;
- case NET_CONNECT_STATUS:
- netdev = devdata->netdev;
- if (cmdrsp->net.enbdis.enable == 1) {
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = cmdrsp->net.enbdis.enable;
- spin_unlock_irqrestore(&devdata->priv_lock,
- flags);
- netif_wake_queue(netdev);
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = cmdrsp->net.enbdis.enable;
- spin_unlock_irqrestore(&devdata->priv_lock,
- flags);
- }
- break;
- default:
- break;
- }
- /* cmdrsp is now available for reuse */
- }
-}
-
-static int visornic_poll(struct napi_struct *napi, int budget)
-{
- struct visornic_devdata *devdata = container_of(napi,
- struct visornic_devdata,
- napi);
- int rx_count = 0;
-
- send_rcv_posts_if_needed(devdata);
- service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
-
- /* If there aren't any more packets to receive stop the poll */
- if (rx_count < budget)
- napi_complete_done(napi, rx_count);
-
- return rx_count;
-}
-
-/* visornic_channel_interrupt - checks the status of the response queue
- *
- * Main function of the vnic_incoming thread. Periodically check the response
- * queue and drain it if needed.
- */
-static void visornic_channel_interrupt(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- if (!visorchannel_signalempty(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART))
- napi_schedule(&devdata->napi);
-
- atomic_set(&devdata->interrupt_rcvd, 0);
-}
-
-/* visornic_probe - probe function for visornic devices
- * @dev: The visor device discovered.
- *
- * Called when visorbus discovers a visornic device on its bus. It creates a new
- * visornic ethernet adapter.
- *
- * Return: 0 on success, or negative integer on error.
- */
-static int visornic_probe(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = NULL;
- struct net_device *netdev = NULL;
- int err;
- int channel_offset = 0;
- u8 addr[ETH_ALEN];
- u64 features;
-
- netdev = alloc_etherdev(sizeof(struct visornic_devdata));
- if (!netdev) {
- dev_err(&dev->device,
- "%s alloc_etherdev failed\n", __func__);
- return -ENOMEM;
- }
-
- netdev->netdev_ops = &visornic_dev_ops;
- netdev->watchdog_timeo = 5 * HZ;
- SET_NETDEV_DEV(netdev, &dev->device);
-
- /* Get MAC address from channel and read it into the device. */
- netdev->addr_len = ETH_ALEN;
- channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
- err = visorbus_read_channel(dev, channel_offset, addr, ETH_ALEN);
- if (err < 0) {
- dev_err(&dev->device,
- "%s failed to get mac addr from chan (%d)\n",
- __func__, err);
- goto cleanup_netdev;
- }
- eth_hw_addr_set(netdev, addr);
-
- devdata = devdata_initialize(netdev_priv(netdev), dev);
- if (!devdata) {
- dev_err(&dev->device,
- "%s devdata_initialize failed\n", __func__);
- err = -ENOMEM;
- goto cleanup_netdev;
- }
- /* don't trust messages laying around in the channel */
- drain_resp_queue(devdata->cmdrsp, devdata);
-
- devdata->netdev = netdev;
- dev_set_drvdata(&dev->device, devdata);
- init_waitqueue_head(&devdata->rsp_queue);
- spin_lock_init(&devdata->priv_lock);
- /* not yet */
- devdata->enabled = 0;
- atomic_set(&devdata->usage, 1);
-
- /* Setup rcv bufs */
- channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
- err = visorbus_read_channel(dev, channel_offset,
- &devdata->num_rcv_bufs, 4);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get #rcv bufs from chan (%d)\n",
- __func__, err);
- goto cleanup_netdev;
- }
-
- devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
- sizeof(struct sk_buff *), GFP_KERNEL);
- if (!devdata->rcvbuf) {
- err = -ENOMEM;
- goto cleanup_netdev;
- }
-
- /* set the net_xmit outstanding threshold
- * always leave two slots open but you should have 3 at a minimum
- * note that max_outstanding_net_xmits must be > 0
- */
- devdata->max_outstanding_net_xmits =
- max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
- devdata->upper_threshold_net_xmits =
- max_t(unsigned long,
- 2, (devdata->max_outstanding_net_xmits - 1));
- devdata->lower_threshold_net_xmits =
- max_t(unsigned long,
- 1, (devdata->max_outstanding_net_xmits / 2));
-
- skb_queue_head_init(&devdata->xmitbufhead);
-
- /* create a cmdrsp we can use to post and unpost rcv buffers */
- devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
- if (!devdata->cmdrsp_rcv) {
- err = -ENOMEM;
- goto cleanup_rcvbuf;
- }
- devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
- if (!devdata->xmit_cmdrsp) {
- err = -ENOMEM;
- goto cleanup_cmdrsp_rcv;
- }
- INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
- devdata->server_down = false;
- devdata->server_change_state = false;
-
- /*set the default mtu */
- channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
- err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get mtu from chan (%d)\n",
- __func__, err);
- goto cleanup_xmit_cmdrsp;
- }
-
- /* TODO: Setup Interrupt information */
- /* Let's start our threads to get responses */
- netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
-
- channel_offset = offsetof(struct visor_io_channel,
- channel_header.features);
- err = visorbus_read_channel(dev, channel_offset, &features, 8);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get features from chan (%d)\n",
- __func__, err);
- goto cleanup_napi_add;
- }
-
- features |= VISOR_CHANNEL_IS_POLLING;
- features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
- err = visorbus_write_channel(dev, channel_offset, &features, 8);
- if (err) {
- dev_err(&dev->device,
- "%s failed to set features in chan (%d)\n",
- __func__, err);
- goto cleanup_napi_add;
- }
-
- /* Note: Interrupts have to be enable before the while
- * loop below because the napi routine is responsible for
- * setting enab_dis_acked
- */
- visorbus_enable_channel_interrupts(dev);
-
- err = register_netdev(netdev);
- if (err) {
- dev_err(&dev->device,
- "%s register_netdev failed (%d)\n", __func__, err);
- goto cleanup_napi_add;
- }
-
- /* create debug/sysfs directories */
- devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
- visornic_debugfs_dir);
- if (!devdata->eth_debugfs_dir) {
- dev_err(&dev->device,
- "%s debugfs_create_dir %s failed\n",
- __func__, netdev->name);
- err = -ENOMEM;
- goto cleanup_register_netdev;
- }
-
- dev_info(&dev->device, "%s success netdev=%s\n",
- __func__, netdev->name);
- return 0;
-
-cleanup_register_netdev:
- unregister_netdev(netdev);
-
-cleanup_napi_add:
- visorbus_disable_channel_interrupts(dev);
- netif_napi_del(&devdata->napi);
-
-cleanup_xmit_cmdrsp:
- kfree(devdata->xmit_cmdrsp);
-
-cleanup_cmdrsp_rcv:
- kfree(devdata->cmdrsp_rcv);
-
-cleanup_rcvbuf:
- kfree(devdata->rcvbuf);
-
-cleanup_netdev:
- free_netdev(netdev);
- return err;
-}
-
-/* host_side_disappeared - IO Partition is gone
- * @devdata: Device object.
- *
- * IO partition servicing this device is gone; do cleanup.
- */
-static void host_side_disappeared(struct visornic_devdata *devdata)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- /* indicate device destroyed */
- devdata->dev = NULL;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* visornic_remove - called when visornic dev goes away
- * @dev: Visornic device that is being removed.
- *
- * Called when DEVICE_DESTROY gets called to remove device.
- */
-static void visornic_remove(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
- struct net_device *netdev;
- unsigned long flags;
-
- if (!devdata) {
- dev_err(&dev->device, "%s no devdata\n", __func__);
- return;
- }
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s already being removed\n", __func__);
- return;
- }
- devdata->going_away = true;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- netdev = devdata->netdev;
- if (!netdev) {
- dev_err(&dev->device, "%s not net device\n", __func__);
- return;
- }
-
- /* going_away prevents new items being added to the workqueues */
- cancel_work_sync(&devdata->timeout_reset);
-
- debugfs_remove_recursive(devdata->eth_debugfs_dir);
- /* this will call visornic_close() */
- unregister_netdev(netdev);
-
- visorbus_disable_channel_interrupts(devdata->dev);
- netif_napi_del(&devdata->napi);
-
- dev_set_drvdata(&dev->device, NULL);
- host_side_disappeared(devdata);
- devdata_release(devdata);
- free_netdev(netdev);
-}
-
-/* visornic_pause - called when IO Part disappears
- * @dev: Visornic device that is being serviced.
- * @complete_func: Call when finished.
- *
- * Called when the IO Partition has gone down. Need to free up resources and
- * wait for IO partition to come back. Mark link as down and don't attempt any
- * DMA. When we have freed memory, call the complete_func so that Command knows
- * we are done. If we don't call complete_func, the IO Partition will never
- * come back.
- *
- * Return: 0 on success.
- */
-static int visornic_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
- visornic_serverdown(devdata, complete_func);
- return 0;
-}
-
-/* visornic_resume - called when IO Partition has recovered
- * @dev: Visornic device that is being serviced.
- * @compelte_func: Call when finished.
- *
- * Called when the IO partition has recovered. Re-establish connection to the IO
- * Partition and set the link up. Okay to do DMA again.
- *
- * Returns 0 for success, negative integer on error.
- */
-static int visornic_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visornic_devdata *devdata;
- struct net_device *netdev;
- unsigned long flags;
-
- devdata = dev_get_drvdata(&dev->device);
- if (!devdata) {
- dev_err(&dev->device, "%s no devdata\n", __func__);
- return -EINVAL;
- }
-
- netdev = devdata->netdev;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->server_change_state) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s server already changing state\n",
- __func__);
- return -EINVAL;
- }
- if (!devdata->server_down) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s server not down\n", __func__);
- complete_func(dev, 0);
- return 0;
- }
- devdata->server_change_state = true;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* Must transition channel to ATTACHED state BEFORE
- * we can start using the device again.
- * TODO: State transitions
- */
- visorbus_enable_channel_interrupts(dev);
-
- rtnl_lock();
- dev_open(netdev, NULL);
- rtnl_unlock();
-
- complete_func(dev, 0);
- return 0;
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visornic_driver = {
- .name = "visornic",
- .owner = THIS_MODULE,
- .channel_types = visornic_channel_types,
- .probe = visornic_probe,
- .remove = visornic_remove,
- .pause = visornic_pause,
- .resume = visornic_resume,
- .channel_interrupt = visornic_channel_interrupt,
-};
-
-/* visornic_init - init function
- *
- * Init function for the visornic driver. Do initial driver setup and wait
- * for devices.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int visornic_init(void)
-{
- int err;
-
- visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
-
- debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
- &debugfs_info_fops);
- debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir, NULL,
- &debugfs_enable_ints_fops);
-
- err = visorbus_register_visor_driver(&visornic_driver);
- if (err)
- debugfs_remove_recursive(visornic_debugfs_dir);
-
- return err;
-}
-
-/* visornic_cleanup - driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visornic_cleanup(void)
-{
- visorbus_unregister_visor_driver(&visornic_driver);
- debugfs_remove_recursive(visornic_debugfs_dir);
-}
-
-module_init(visornic_init);
-module_exit(visornic_cleanup);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index cb7c82403dbf..31e58c9d1a11 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -13,6 +13,7 @@ if BCM_VIDEOCORE
config BCM2835_VCHIQ
tristate "BCM2835 VCHIQ"
+ depends on HAS_DMA
imply VCHIQ_CDEV
help
Broadcom BCM2835 and similar SoCs have a VPU called VideoCore. This config
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index d32ea348e846..7f22f6c85067 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -3,7 +3,9 @@ config SND_BCM2835
tristate "BCM2835 Audio"
depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
select SND_PCM
- select BCM2835_VCHIQ
+ select BCM2835_VCHIQ if HAS_DMA
help
- Say Y or M if you want to support BCM2835 built in audio
-
+ Say Y or M if you want to support BCM2835 built in audio.
+ This driver handles both 3.5mm and HDMI audio, by leveraging
+ the VCHIQ messaging interface between the kernel and the firmware
+ running on VideoCore. \ No newline at end of file
diff --git a/drivers/staging/vc04_services/bcm2835-audio/TODO b/drivers/staging/vc04_services/bcm2835-audio/TODO
deleted file mode 100644
index b85451255db0..000000000000
--- a/drivers/staging/vc04_services/bcm2835-audio/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-*****************************************************************************
-* *
-* TODO: BCM2835-AUDIO *
-* *
-*****************************************************************************
-
-1) Revisit multi-cards options and PCM route mixer control (as per comment
-https://lore.kernel.org/lkml/s5hd0to5598.wl-tiwai@suse.de)
-
-2) Fix the remaining checkpatch.pl errors and warnings.
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index 3703409715da..1c1f040122d7 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -117,15 +117,6 @@ static const struct snd_kcontrol_new snd_bcm2835_ctl[] = {
.get = snd_bcm2835_ctl_get,
.put = snd_bcm2835_ctl_put,
},
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Route",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_DEVICE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- },
};
static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
@@ -220,7 +211,14 @@ static int create_ctls(struct bcm2835_chip *chip, size_t size,
return 0;
}
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
+int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
+{
+ strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+ return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl),
+ snd_bcm2835_ctl);
+}
+
+int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
{
int err;
@@ -232,71 +230,3 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
snd_bcm2835_spdif);
}
-static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Headphone Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .private_value = PCM_PLAYBACK_VOLUME,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- .tlv = {.p = snd_bcm2835_db_scale}
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Headphone Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_MUTE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- }
-};
-
-int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
-{
- strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
- return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl),
- snd_bcm2835_headphones_ctl);
-}
-
-static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "HDMI Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .private_value = PCM_PLAYBACK_VOLUME,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- .tlv = {.p = snd_bcm2835_db_scale}
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "HDMI Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_MUTE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- }
-};
-
-int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
-{
- strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
- return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi),
- snd_bcm2835_hdmi);
-}
-
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index f2ef1d641e70..68e8d491a7ec 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -82,8 +82,7 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream,
}
/* open callback */
-static int snd_bcm2835_playback_open_generic(
- struct snd_pcm_substream *substream, int spdif)
+static int snd_bcm2835_playback_open_generic(struct snd_pcm_substream *substream, int spdif)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -237,7 +236,7 @@ static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- void *src = (void *) (substream->runtime->dma_area + rec->sw_data);
+ void *src = (void *)(substream->runtime->dma_area + rec->sw_data);
bcm2835_audio_write(alsa_stream, bytes, src);
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index d567a2e3f70c..e429b33b4d39 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -11,7 +11,7 @@ struct bcm2835_audio_instance {
struct device *dev;
unsigned int service_handle;
struct completion msg_avail_comp;
- struct mutex vchi_mutex;
+ struct mutex vchi_mutex; /* Serialize vchiq access */
struct bcm2835_alsa_stream *alsa_stream;
int result;
unsigned int max_packet;
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index 628732d7bf6a..00bc898b0189 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -10,17 +10,13 @@
#include "bcm2835.h"
static bool enable_hdmi;
-static bool enable_headphones;
-static bool enable_compat_alsa = true;
+static bool enable_headphones = true;
static int num_channels = MAX_SUBSTREAMS;
module_param(enable_hdmi, bool, 0444);
MODULE_PARM_DESC(enable_hdmi, "Enables HDMI virtual audio device");
module_param(enable_headphones, bool, 0444);
MODULE_PARM_DESC(enable_headphones, "Enables Headphones virtual audio device");
-module_param(enable_compat_alsa, bool, 0444);
-MODULE_PARM_DESC(enable_compat_alsa,
- "Enables ALSA compatibility virtual audio device");
module_param(num_channels, int, 0644);
MODULE_PARM_DESC(num_channels, "Number of audio channels (default: 8)");
@@ -63,19 +59,20 @@ struct bcm2835_audio_driver {
enum snd_bcm2835_route route;
};
-static int bcm2835_audio_alsa_newpcm(struct bcm2835_chip *chip,
+static int bcm2835_audio_dual_newpcm(struct bcm2835_chip *chip,
const char *name,
enum snd_bcm2835_route route,
u32 numchannels)
{
int err;
- err = snd_bcm2835_new_pcm(chip, "bcm2835 ALSA", 0, AUDIO_DEST_AUTO,
- numchannels - 1, false);
+ err = snd_bcm2835_new_pcm(chip, name, 0, route,
+ numchannels, false);
+
if (err)
return err;
- err = snd_bcm2835_new_pcm(chip, "bcm2835 IEC958/HDMI", 1, 0, 1, true);
+ err = snd_bcm2835_new_pcm(chip, "IEC958", 1, route, 1, true);
if (err)
return err;
@@ -90,18 +87,6 @@ static int bcm2835_audio_simple_newpcm(struct bcm2835_chip *chip,
return snd_bcm2835_new_pcm(chip, name, 0, route, numchannels, false);
}
-static struct bcm2835_audio_driver bcm2835_audio_alsa = {
- .driver = {
- .name = "bcm2835_alsa",
- .owner = THIS_MODULE,
- },
- .shortname = "bcm2835 ALSA",
- .longname = "bcm2835 ALSA",
- .minchannels = 2,
- .newpcm = bcm2835_audio_alsa_newpcm,
- .newctl = snd_bcm2835_new_ctl,
-};
-
static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
.driver = {
.name = "bcm2835_hdmi",
@@ -110,7 +95,7 @@ static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
.shortname = "bcm2835 HDMI",
.longname = "bcm2835 HDMI",
.minchannels = 1,
- .newpcm = bcm2835_audio_simple_newpcm,
+ .newpcm = bcm2835_audio_dual_newpcm,
.newctl = snd_bcm2835_new_hdmi_ctl,
.route = AUDIO_DEST_HDMI
};
@@ -135,10 +120,6 @@ struct bcm2835_audio_drivers {
static struct bcm2835_audio_drivers children_devices[] = {
{
- .audio_driver = &bcm2835_audio_alsa,
- .is_enabled = &enable_compat_alsa,
- },
- {
.audio_driver = &bcm2835_audio_hdmi,
.is_enabled = &enable_hdmi,
},
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 51066ac8eea5..38b7451d77b2 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -61,7 +61,7 @@ struct bcm2835_chip {
unsigned int opened;
unsigned int spdif_status;
- struct mutex audio_mutex;
+ struct mutex audio_mutex; /* Serialize chip data access */
struct bcm2835_vchi_ctx *vchi_ctx;
};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Kconfig b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
index dcda565f9b38..870c9afb223a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
@@ -3,8 +3,8 @@ config VIDEO_BCM2835
tristate "BCM2835 Camera"
depends on MEDIA_SUPPORT
depends on VIDEO_DEV && (ARCH_BCM2835 || COMPILE_TEST)
- select BCM2835_VCHIQ
- select BCM2835_VCHIQ_MMAL
+ select BCM2835_VCHIQ if HAS_DMA
+ select BCM2835_VCHIQ_MMAL if HAS_DMA
select VIDEOBUF2_VMALLOC
select BTREE
help
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 88b1878854e0..fd456d1f7061 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1033,9 +1033,9 @@ static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
preview_port->es.video.crop.y = 0;
preview_port->es.video.crop.width = f->fmt.pix.width;
preview_port->es.video.crop.height = f->fmt.pix.height;
- preview_port->es.video.frame_rate.num =
+ preview_port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
- preview_port->es.video.frame_rate.den =
+ preview_port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
@@ -1084,9 +1084,9 @@ static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
port->es.video.crop.y = 0;
port->es.video.crop.width = f->fmt.pix.width;
port->es.video.crop.height = f->fmt.pix.height;
- port->es.video.frame_rate.num =
+ port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
- port->es.video.frame_rate.den =
+ port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
port->format.encoding = mfmt->mmal;
@@ -1225,8 +1225,8 @@ static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
camera_port->es.video.crop.y = 0;
camera_port->es.video.crop.width = f->fmt.pix.width;
camera_port->es.video.crop.height = f->fmt.pix.height;
- camera_port->es.video.frame_rate.num = 0;
- camera_port->es.video.frame_rate.den = 1;
+ camera_port->es.video.frame_rate.numerator = 0;
+ camera_port->es.video.frame_rate.denominator = 1;
camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
@@ -1629,8 +1629,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_VIDEO].format;
@@ -1643,8 +1643,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_CAPTURE].format;
@@ -1656,8 +1656,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 2592;
format->es->video.crop.height = 1944;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
dev->capture.width = format->es->video.width;
dev->capture.height = format->es->video.height;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index eb722f16fb91..5644d1d457b9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -154,13 +154,13 @@ static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
- struct mmal_parameter_rational rational_value;
+ struct s32_fract rational_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
- rational_value.num = ctrl->val;
- rational_value.den = 100;
+ rational_value.numerator = ctrl->val;
+ rational_value.denominator = 100;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
@@ -489,9 +489,10 @@ static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
dev->blue_gain = ctrl->val;
- gains.r_gain.num = dev->red_gain;
- gains.b_gain.num = dev->blue_gain;
- gains.r_gain.den = gains.b_gain.den = 1000;
+ gains.r_gain.numerator = dev->red_gain;
+ gains.r_gain.denominator = 1000;
+ gains.b_gain.numerator = dev->blue_gain;
+ gains.b_gain.denominator = 1000;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
@@ -1271,26 +1272,26 @@ int set_framerate_params(struct bcm2835_mmal_dev *dev)
struct mmal_parameter_fps_range fps_range;
int ret;
- fps_range.fps_high.num = dev->capture.timeperframe.denominator;
- fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+ fps_range.fps_high.numerator = dev->capture.timeperframe.denominator;
+ fps_range.fps_high.denominator = dev->capture.timeperframe.numerator;
if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
(dev->exp_auto_priority)) {
/* Variable FPS. Define min FPS as 1fps. */
- fps_range.fps_low.num = 1;
- fps_range.fps_low.den = 1;
+ fps_range.fps_low.numerator = 1;
+ fps_range.fps_low.denominator = 1;
} else {
/* Fixed FPS - set min and max to be the same */
- fps_range.fps_low.num = fps_range.fps_high.num;
- fps_range.fps_low.den = fps_range.fps_high.den;
+ fps_range.fps_low.numerator = fps_range.fps_high.numerator;
+ fps_range.fps_low.denominator = fps_range.fps_high.denominator;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Set fps range to %d/%d to %d/%d\n",
- fps_range.fps_low.num,
- fps_range.fps_low.den,
- fps_range.fps_high.num,
- fps_range.fps_high.den);
+ fps_range.fps_low.numerator,
+ fps_range.fps_low.denominator,
+ fps_range.fps_high.numerator,
+ fps_range.fps_high.denominator);
ret = vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW],
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index f0bfacfdea80..0596ac61e286 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -431,21 +431,18 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
if (head_bytes > actual)
head_bytes = actual;
- memcpy((char *)kmap(pages[0]) +
+ memcpy_to_page(pages[0],
pagelist->offset,
fragments,
head_bytes);
- kunmap(pages[0]);
}
if ((actual >= 0) && (head_bytes < actual) &&
- (tail_bytes != 0)) {
- memcpy((char *)kmap(pages[num_pages - 1]) +
- ((pagelist->offset + actual) &
- (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
+ (tail_bytes != 0))
+ memcpy_to_page(pages[num_pages - 1],
+ (pagelist->offset + actual) &
+ (PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
fragments + g_cache_line_size,
tail_bytes);
- kunmap(pages[num_pages - 1]);
- }
down(&g_free_fragments_mutex);
*(char **)fragments = g_free_fragments;
@@ -918,8 +915,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
struct vchiq_instance *instance;
struct vchiq_service *service;
enum vchiq_status status;
- struct bulk_waiter_node *waiter = NULL;
- bool found = false;
+ struct bulk_waiter_node *waiter = NULL, *iter;
service = find_service_by_handle(handle);
if (!service)
@@ -930,16 +926,16 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
- list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
- if (waiter->pid == current->pid) {
- list_del(&waiter->list);
- found = true;
+ list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (found) {
+ if (waiter) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 82b7bd7b54b2..1ddc661642a9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -79,7 +79,6 @@
#define BITSET_BIT(b) (1 << (b & 31))
#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
-#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
enum {
DEBUG_ENTRIES,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index b41c2a267355..66bbfec332ba 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -289,8 +289,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
enum vchiq_bulk_mode __user *mode)
{
struct vchiq_service *service;
- struct bulk_waiter_node *waiter = NULL;
- bool found = false;
+ struct bulk_waiter_node *waiter = NULL, *iter;
void *userdata;
int status = 0;
int ret;
@@ -309,16 +308,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
userdata = &waiter->bulk_waiter;
} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
mutex_lock(&instance->bulk_waiter_list_mutex);
- list_for_each_entry(waiter, &instance->bulk_waiter_list,
+ list_for_each_entry(iter, &instance->bulk_waiter_list,
list) {
- if (waiter->pid == current->pid) {
- list_del(&waiter->list);
- found = true;
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (!found) {
+ if (!waiter) {
vchiq_log_error(vchiq_arm_log_level,
"no bulk_waiter found for pid %d", current->pid);
ret = -ESRCH;
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
index d77e15f25dda..492d4c5dca08 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
@@ -14,6 +14,8 @@
#ifndef MMAL_MSG_COMMON_H
#define MMAL_MSG_COMMON_H
+#include <linux/types.h>
+
enum mmal_msg_status {
MMAL_MSG_STATUS_SUCCESS = 0, /**< Success */
MMAL_MSG_STATUS_ENOMEM, /**< Out of memory */
@@ -40,9 +42,4 @@ struct mmal_rect {
s32 height; /**< height */
};
-struct mmal_rational {
- s32 num; /**< Numerator */
- s32 den; /**< Denominator */
-};
-
#endif /* MMAL_MSG_COMMON_H */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
index 1e996d8cd283..5569876d8c7d 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
@@ -14,6 +14,8 @@
#ifndef MMAL_MSG_FORMAT_H
#define MMAL_MSG_FORMAT_H
+#include <linux/math.h>
+
#include "mmal-msg-common.h"
/* MMAL_ES_FORMAT_T */
@@ -30,8 +32,8 @@ struct mmal_video_format {
u32 width; /* Width of frame in pixels */
u32 height; /* Height of frame in rows of pixels */
struct mmal_rect crop; /* Visible region of the frame */
- struct mmal_rational frame_rate; /* Frame rate */
- struct mmal_rational par; /* Pixel aspect ratio */
+ struct s32_fract frame_rate; /* Frame rate */
+ struct s32_fract par; /* Pixel aspect ratio */
/*
* FourCC specifying the color space of the video stream. See the
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
index 2277e05b1e31..a0cdd28101f2 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
@@ -22,6 +22,8 @@
#ifndef MMAL_PARAMETERS_H
#define MMAL_PARAMETERS_H
+#include <linux/math.h>
+
/** Common parameter ID group, used with many types of component. */
#define MMAL_PARAMETER_GROUP_COMMON (0 << 16)
/** Camera-specific parameter ID group. */
@@ -223,11 +225,6 @@ enum mmal_parameter_camera_type {
MMAL_PARAMETER_CUSTOM_AWB_GAINS,
};
-struct mmal_parameter_rational {
- s32 num; /**< Numerator */
- s32 den; /**< Denominator */
-};
-
enum mmal_parameter_camera_config_timestamp_mode {
MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value
@@ -243,9 +240,9 @@ enum mmal_parameter_camera_config_timestamp_mode {
struct mmal_parameter_fps_range {
/**< Low end of the permitted framerate range */
- struct mmal_parameter_rational fps_low;
+ struct s32_fract fps_low;
/**< High end of the permitted framerate range */
- struct mmal_parameter_rational fps_high;
+ struct s32_fract fps_high;
};
/* camera configuration parameter */
@@ -350,8 +347,8 @@ enum MMAL_PARAM_FLICKERAVOID {
};
struct mmal_parameter_awbgains {
- struct mmal_parameter_rational r_gain; /**< Red gain */
- struct mmal_parameter_rational b_gain; /**< Blue gain */
+ struct s32_fract r_gain; /**< Red gain */
+ struct s32_fract b_gain; /**< Blue gain */
};
/** Manner of video rate control */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 70c9d5544b56..845b20e4d05a 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -744,9 +744,9 @@ static void dump_port_info(struct vchiq_mmal_port *port)
port->es.video.crop.y,
port->es.video.crop.width, port->es.video.crop.height);
pr_debug(" : framerate %d/%d aspect %d/%d\n",
- port->es.video.frame_rate.num,
- port->es.video.frame_rate.den,
- port->es.video.par.num, port->es.video.par.den);
+ port->es.video.frame_rate.numerator,
+ port->es.video.frame_rate.denominator,
+ port->es.video.par.numerator, port->es.video.par.denominator);
}
}
@@ -1549,8 +1549,8 @@ int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
dst->es.video.crop.y = src->es.video.crop.y;
dst->es.video.crop.width = src->es.video.crop.width;
dst->es.video.crop.height = src->es.video.crop.height;
- dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
- dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
+ dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
+ dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
/* set new format */
ret = port_info_set(instance, dst);
@@ -1841,7 +1841,6 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex);
vchiq_shutdown(instance->vchiq_instance);
- flush_workqueue(instance->bulk_wq);
destroy_workqueue(instance->bulk_wq);
idr_destroy(&instance->context_map);
diff --git a/drivers/staging/vme/Makefile b/drivers/staging/vme/Makefile
deleted file mode 100644
index cf2f686ccffe..000000000000
--- a/drivers/staging/vme/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-y += devices/
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme_user/Kconfig
index 5651bb16b28b..e8b4461bf27f 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme_user/Kconfig
@@ -3,7 +3,7 @@ comment "VME Device Drivers"
config VME_USER
tristate "VME user space access driver"
- depends on STAGING
+ depends on STAGING && VME_BUS
help
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme_user/Makefile
index 5380115139b0..5380115139b0 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme_user/Makefile
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme_user/vme_user.c
index e3fa38bd7f12..859af797630c 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme_user/vme_user.c
@@ -773,7 +773,7 @@ MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
module_param_array(bus, int, &bus_num, 0000);
MODULE_DESCRIPTION("VME User Space Access Driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_LICENSE("GPL");
module_init(vme_user_init);
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme_user/vme_user.h
index 19ecb05781cc..19ecb05781cc 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme_user/vme_user.h
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index dfdb0ebf43b5..577a38fae369 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -29,7 +29,6 @@
*
*/
-#include "tmacro.h"
#include "mac.h"
#include "baseband.h"
#include "srom.h"
@@ -1910,19 +1909,19 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+ iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* turn on REGR */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(iobase + MAC_REG_BBREGDATA, pby_data);
+ *pby_data = ioread8(iobase + MAC_REG_BBREGDATA);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1953,15 +1952,15 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+ iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* set BB data */
- VNSvOutPortB(iobase + MAC_REG_BBREGDATA, by_data);
+ iowrite8(by_data, iobase + MAC_REG_BBREGDATA);
/* turn on BBREGCTL_REGW */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
@@ -2054,7 +2053,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
- VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+ iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 1110366fc415..2cde0082fc03 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -11,7 +11,7 @@
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
* CARDqGetTSFOffset - Calculate TSFOffset
- * CARDbGetCurrentTSF - Read Current NIC TSF counter
+ * vt6655_get_current_tsf - Read Current NIC TSF counter
* CARDqGetNextTBTT - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
@@ -24,7 +24,6 @@
*
*/
-#include "tmacro.h"
#include "card.h"
#include "baseband.h"
#include "mac.h"
@@ -239,26 +238,25 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
if (priv->bySIFS != bySIFS) {
priv->bySIFS = bySIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_SIFS, priv->bySIFS);
+ iowrite8(priv->bySIFS, priv->port_offset + MAC_REG_SIFS);
}
if (priv->byDIFS != byDIFS) {
priv->byDIFS = byDIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_DIFS, priv->byDIFS);
+ iowrite8(priv->byDIFS, priv->port_offset + MAC_REG_DIFS);
}
if (priv->byEIFS != C_EIFS) {
priv->byEIFS = C_EIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_EIFS, priv->byEIFS);
+ iowrite8(priv->byEIFS, priv->port_offset + MAC_REG_EIFS);
}
if (priv->bySlot != bySlot) {
priv->bySlot = bySlot;
- VNSvOutPortB(priv->port_offset + MAC_REG_SLOT, priv->bySlot);
+ iowrite8(priv->bySlot, priv->port_offset + MAC_REG_SLOT);
bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->port_offset + MAC_REG_CWMAXMIN0,
- priv->byCWMaxMin);
+ iowrite8(priv->byCWMaxMin, priv->port_offset + MAC_REG_CWMAXMIN0);
}
priv->byPacketType = CARDbyGetPktType(priv);
@@ -289,7 +287,7 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 local_tsf;
u64 qwTSFOffset = 0;
- CARDbGetCurrentTSF(priv, &local_tsf);
+ local_tsf = vt6655_get_current_tsf(priv);
if (qwBSSTimestamp != local_tsf) {
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
@@ -321,9 +319,9 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
bool CARDbSetBeaconPeriod(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
- u64 qwNextTBTT = 0;
+ u64 qwNextTBTT;
- CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+ qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
@@ -740,24 +738,24 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
*
* Return Value: true if success; otherwise false
*/
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
+u64 vt6655_get_current_tsf(struct vnt_private *priv)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char data;
+ u32 low, high;
MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_TFTCTL, &data);
+ data = ioread8(iobase + MAC_REG_TFTCTL);
if (!(data & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
- return false;
- VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
- VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
-
- return true;
+ return 0;
+ low = ioread32(iobase + MAC_REG_TSFCNTR);
+ high = ioread32(iobase + MAC_REG_TSFCNTR + 4);
+ return le64_to_cpu(low + ((u64)high << 32));
}
/*
@@ -804,9 +802,9 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
void __iomem *iobase = priv->port_offset;
- u64 qwNextTBTT = 0;
+ u64 qwNextTBTT;
- CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+ qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 09e7f3f1cbed..22dc359a6565 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -46,7 +46,7 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF);
+u64 vt6655_get_current_tsf(struct vnt_private *priv);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
unsigned char CARDbyGetPktType(struct vnt_private *priv);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index abe867814dc8..652dcaf61169 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -118,11 +118,9 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
/* set HW default power register */
MACvSelectPage1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
- VNSvOutPortB(priv->port_offset + MAC_REG_PWRCCK,
- priv->byCurPwr);
+ iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
- VNSvOutPortB(priv->port_offset + MAC_REG_PWROFDM,
- priv->byCurPwr);
+ iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
MACvSelectPage0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 897d70cf32b8..afaf331fe125 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -219,7 +219,7 @@ static void device_init_registers(struct vnt_private *priv)
MACvInitialize(priv);
/* Get Local ID */
- VNSvInPortB(priv->port_offset + MAC_REG_LOCALID, &priv->local_id);
+ priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
spin_lock_irqsave(&priv->lock, flags);
@@ -334,8 +334,7 @@ static void device_init_registers(struct vnt_private *priv)
if (priv->local_id > REV_ID_VT3253_B1) {
MACvSelectPage1(priv->port_offset);
- VNSvOutPortB(priv->port_offset + MAC_REG_MSRCTL + 1,
- (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
+ iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
MACvSelectPage0(priv->port_offset);
}
@@ -349,9 +348,9 @@ static void device_init_registers(struct vnt_private *priv)
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
/* enable TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
/* initialize BBP registers */
bb_vt3253_init(priv);
@@ -377,7 +376,7 @@ static void device_init_registers(struct vnt_private *priv)
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
- MACvGPIOIn(priv->port_offset, &priv->byGPIO);
+ priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
if (((priv->byGPIO & GPIO0_DATA) &&
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
@@ -406,7 +405,7 @@ static void device_init_registers(struct vnt_private *priv)
MACvReceive1(priv->port_offset);
/* start the adapter */
- MACvStart(priv->port_offset);
+ iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
}
static void device_print_info(struct vnt_private *priv)
@@ -1029,7 +1028,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
u32 isr;
unsigned long flags;
- MACvReadISR(priv->port_offset, &isr);
+ isr = ioread32(priv->port_offset + MAC_REG_ISR);
if (isr == 0)
return;
@@ -1042,7 +1041,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
- MACvReadMIBCounter(priv->port_offset, &mib_counter);
+ mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
@@ -1060,7 +1059,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
if (isr & ISR_FETALERR) {
pr_debug(" ISR_FETALERR\n");
- VNSvOutPortB(priv->port_offset + MAC_REG_SOFTPWRCTL, 0);
+ iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
VNSvOutPortW(priv->port_offset +
MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
device_error(priv, isr);
@@ -1116,7 +1115,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
ieee80211_queue_stopped(priv->hw, 0))
ieee80211_wake_queues(priv->hw);
- MACvReadISR(priv->port_offset, &isr);
+ isr = ioread32(priv->port_offset + MAC_REG_ISR);
MACvReceive0(priv->port_offset);
MACvReceive1(priv->port_offset);
@@ -1407,7 +1406,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
- MACvWriteBSSIDAddress(priv->port_offset, (u8 *)conf->bssid);
+ MACvWriteBSSIDAddress(priv->port_offset, conf->bssid);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1477,10 +1476,8 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
} else {
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
- TFTCTL_TSFCNTRST);
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
- TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
}
}
}
@@ -1513,7 +1510,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
- VNSvInPortB(priv->port_offset + MAC_REG_RCR, &rx_mode);
+ rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
@@ -1561,7 +1558,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
rx_mode |= RCR_BSSID;
}
- VNSvOutPortB(priv->port_offset + MAC_REG_RCR, rx_mode);
+ iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
}
@@ -1603,7 +1600,7 @@ static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct vnt_private *priv = hw->priv;
u64 tsf;
- CARDbGetCurrentTSF(priv, &tsf);
+ tsf = vt6655_get_current_tsf(priv);
return tsf;
}
@@ -1621,7 +1618,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct vnt_private *priv = hw->priv;
/* reset TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
}
static const struct ieee80211_ops vnt_mac_ops = {
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index f843966a3ea4..1469015eb5b4 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -11,7 +11,6 @@
*
*/
-#include "tmacro.h"
#include "key.h"
#include "mac.h"
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 80cced7dfda8..88ddd0676463 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -36,7 +36,6 @@
*
*/
-#include "tmacro.h"
#include "mac.h"
/*
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 550dc4da80a9..57ae3bdbdb2d 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -18,7 +18,6 @@
#ifndef __MAC_H__
#define __MAC_H__
-#include "tmacro.h"
#include "upc.h"
/*--------------------- Export Definitions -------------------------*/
@@ -261,18 +260,18 @@
#define TFTCTL_TSFCNTREN 0x01
/* Bits in the EnhanceCFG register */
-#define EnCFG_BarkerPream 0x00020000
-#define EnCFG_NXTBTTCFPSTR 0x00010000
-#define EnCFG_BcnSusClr 0x00000200
-#define EnCFG_BcnSusInd 0x00000100
-#define EnCFG_CFP_ProtectEn 0x00000040
-#define EnCFG_ProtectMd 0x00000020
-#define EnCFG_HwParCFP 0x00000010
-#define EnCFG_CFNULRSP 0x00000004
-#define EnCFG_BBType_MASK 0x00000003
-#define EnCFG_BBType_g 0x00000002
-#define EnCFG_BBType_b 0x00000001
-#define EnCFG_BBType_a 0x00000000
+#define ENCFG_BARKERPREAM 0x00020000
+#define ENCFG_NXTBTTCFPSTR 0x00010000
+#define ENCFG_BCNSUSCLR 0x00000200
+#define ENCFG_BCNSUSIND 0x00000100
+#define ENCFG_CFP_PROTECTEN 0x00000040
+#define ENCFG_PROTECTMD 0x00000020
+#define ENCFG_HWPARCFP 0x00000010
+#define ENCFG_CFNULRSP 0x00000004
+#define ENCFG_BBTYPE_MASK 0x00000003
+#define ENCFG_BBTYPE_G 0x00000002
+#define ENCFG_BBTYPE_B 0x00000001
+#define ENCFG_BBTYPE_A 0x00000000
/* Bits in the Page1Sel register */
#define PAGE1_SEL 0x01
@@ -497,7 +496,7 @@
#define MAC_LB_INTERNAL 0x01
#define MAC_LB_NONE 0x00
-#define Default_BI 0x200
+#define DEFAULT_BI 0x200
/* MiscFIFO Offset */
#define MISCFIFO_KEYETRY0 32
@@ -541,77 +540,31 @@
#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
+ byData = ioread8(iobase + byRegOfs); \
+ iowrite8(byData | (byBits), iobase + byRegOfs); \
} while (0)
#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(iobase + byRegOfs, &wData); \
+ wData = ioread16(iobase + byRegOfs); \
VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
} while (0)
-#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + byRegOfs, &dwData); \
- VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \
-} while (0)
-
-#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \
-do { \
- unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- byData &= byMask; \
- VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
-} while (0)
-
#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \
+ byData = ioread8(iobase + byRegOfs); \
+ iowrite8(byData & ~(byBits), iobase + byRegOfs); \
} while (0)
#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(iobase + byRegOfs, &wData); \
+ wData = ioread16(iobase + byRegOfs); \
VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
} while (0)
-#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + byRegOfs, &dwData); \
- VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \
-} while (0)
-
-#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
/* set the chip with current BCN tx descriptor address */
#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
@@ -622,104 +575,40 @@ do { \
VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2, \
wCurrBCNLength)
-#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(iobase + MAC_REG_BSSID0, \
- (unsigned char *)pbyEtherAddr); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \
- pbyEtherAddr + 1); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \
- pbyEtherAddr + 2); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \
- pbyEtherAddr + 3); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \
- pbyEtherAddr + 4); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \
- pbyEtherAddr + 5); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0, \
- *(pbyEtherAddr)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \
- *(pbyEtherAddr + 1)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \
- *(pbyEtherAddr + 2)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \
- *(pbyEtherAddr + 3)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \
- *(pbyEtherAddr + 4)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \
- *(pbyEtherAddr + 5)); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
+ iowrite8(pbyEtherAddr[0], iobase + MAC_REG_BSSID0); \
+ iowrite8(pbyEtherAddr[1], iobase + MAC_REG_BSSID0 + 1); \
+ iowrite8(pbyEtherAddr[2], iobase + MAC_REG_BSSID0 + 2); \
+ iowrite8(pbyEtherAddr[3], iobase + MAC_REG_BSSID0 + 3); \
+ iowrite8(pbyEtherAddr[4], iobase + MAC_REG_BSSID0 + 4); \
+ iowrite8(pbyEtherAddr[5], iobase + MAC_REG_BSSID0 + 5); \
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
} while (0)
#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(iobase + MAC_REG_PAR0, \
- (unsigned char *)pbyEtherAddr); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \
- pbyEtherAddr + 1); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \
- pbyEtherAddr + 2); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \
- pbyEtherAddr + 3); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \
- pbyEtherAddr + 4); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \
- pbyEtherAddr + 5); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
+ pbyEtherAddr[0] = ioread8(iobase + MAC_REG_PAR0); \
+ pbyEtherAddr[1] = ioread8(iobase + MAC_REG_PAR0 + 1); \
+ pbyEtherAddr[2] = ioread8(iobase + MAC_REG_PAR0 + 2); \
+ pbyEtherAddr[3] = ioread8(iobase + MAC_REG_PAR0 + 3); \
+ pbyEtherAddr[4] = ioread8(iobase + MAC_REG_PAR0 + 4); \
+ pbyEtherAddr[5] = ioread8(iobase + MAC_REG_PAR0 + 5); \
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
} while (0)
-#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(iobase + MAC_REG_PAR0, \
- *pbyEtherAddr); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \
- *(pbyEtherAddr + 1)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \
- *(pbyEtherAddr + 2)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \
- *(pbyEtherAddr + 3)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \
- *(pbyEtherAddr + 4)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \
- *(pbyEtherAddr + 5)); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
-#define MACvClearISR(iobase) \
- VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
-
-#define MACvStart(iobase) \
- VNSvOutPortB(iobase + MAC_REG_HOSTCR, \
- (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
-
#define MACvRx0PerPktMode(iobase) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
-#define MACvRx0BufferFillMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
-
#define MACvRx1PerPktMode(iobase) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
-#define MACvRx1BufferFillMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
-
-#define MACvRxOn(iobase) \
- MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
-
#define MACvReceive0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
else \
@@ -729,20 +618,17 @@ do { \
#define MACvReceive1(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
else \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
} while (0)
-#define MACvTxOn(iobase) \
- MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
-
#define MACvTransmit0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
else \
@@ -752,47 +638,21 @@ do { \
#define MACvTransmitAC0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
else \
VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitSYNC(iobase) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \
- if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
- else \
- VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitATIM(iobase) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \
- if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
- else \
- VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitBCN(iobase) \
- VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
-
#define MACvClearStckDS(iobase) \
do { \
unsigned char byOrgValue; \
- VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \
+ byOrgValue = ioread8(iobase + MAC_REG_STICKHW); \
byOrgValue = byOrgValue & 0xFC; \
- VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \
+ iowrite8(byOrgValue, iobase + MAC_REG_STICKHW); \
} while (0)
-#define MACvReadISR(iobase, pdwValue) \
- VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
-
#define MACvWriteISR(iobase, dwValue) \
VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
@@ -803,77 +663,58 @@ do { \
VNSvOutPortD(iobase + MAC_REG_IMR, 0)
#define MACvSelectPage0(iobase) \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL)
#define MACvSelectPage1(iobase) \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
-
-#define MACvReadMIBCounter(iobase, pdwCounter) \
- VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
-
-#define MACvPwrEvntDisable(iobase) \
- VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL)
#define MACvEnableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvDisableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvEnableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvDisableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvSetBBType(iobase, byTyp) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvReadATIMW(iobase, pwCounter) \
- VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
-
-#define MACvWriteATIMW(iobase, wCounter) \
- VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
-
-#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortW(iobase + byRegOfs, wCRC); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
-#define MACvGPIOIn(iobase, pbyValue) \
- VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
-
#define MACvSetRFLE_LatchBase(iobase) \
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define MAKEWORD(lb, hb) \
+ ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
+
bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 4498c9d400f2..ee5e2e0d9a8c 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -175,7 +175,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
+ dwValue = ioread32(iobase + MAC_REG_IFREGCTL);
if (dwValue & IFREGCTL_DONE)
break;
}
@@ -207,7 +207,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
+ iowrite8(0, iobase + MAC_REG_SOFTPWRCTL);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
@@ -238,7 +238,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
- VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG);
return ret;
}
@@ -254,10 +254,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
ret &= IFRFbWriteEmbedded(priv, al2230_channel_table1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ iowrite8(byChannel & 0x7F, iobase + MAC_REG_CHANNEL);
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ iowrite8(byChannel | 0x80, iobase + MAC_REG_CHANNEL);
return ret;
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 53506e242a96..71cbfa607d96 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1426,7 +1426,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Set auto Transmit on */
MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
/* Poll Transmit the adapter */
- MACvTransmitBCN(priv->port_offset);
+ iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL);
return 0;
}
@@ -1450,9 +1450,9 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
{
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 5cdbc24e8c45..722a2cc9a473 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -28,7 +28,6 @@
*/
#include "upc.h"
-#include "tmacro.h"
#include "mac.h"
#include "srom.h"
@@ -66,29 +65,29 @@ unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byOrg;
byData = 0xFF;
- VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
+ byOrg = ioread8(iobase + MAC_REG_I2MCFG);
/* turn off hardware retry for getting NACK */
- VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+ iowrite8(byOrg & (~I2MCFG_NORETRY), iobase + MAC_REG_I2MCFG);
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
+ iowrite8(EEP_I2C_DEV_ID, iobase + MAC_REG_I2MTGID);
+ iowrite8(byContntOffset, iobase + MAC_REG_I2MTGAD);
/* issue read command */
- VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+ iowrite8(I2MCSR_EEMR, iobase + MAC_REG_I2MCSR);
/* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
+ byWait = ioread8(iobase + MAC_REG_I2MCSR);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
break;
- PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
+ udelay(CB_DELAY_LOOP_WAIT);
}
if ((wDelay < W_MAX_TIMEOUT) &&
(!(byWait & I2MCSR_NACK))) {
break;
}
}
- VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
- VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
+ byData = ioread8(iobase + MAC_REG_I2MDIPT);
+ iowrite8(byOrg, iobase + MAC_REG_I2MCFG);
return byData;
}
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
deleted file mode 100644
index 1582c03124c9..000000000000
--- a/drivers/staging/vt6655/tmacro.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * Purpose: define basic common types and macros
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __TMACRO_H__
-#define __TMACRO_H__
-
-/****** Common helper macros ***********************************************/
-
-#if !defined(LOBYTE)
-#define LOBYTE(w) ((unsigned char)(w))
-#endif
-#if !defined(HIBYTE)
-#define HIBYTE(w) ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
-#endif
-
-#if !defined(LOWORD)
-#define LOWORD(d) ((unsigned short)(d))
-#endif
-#if !defined(HIWORD)
-#define HIWORD(d) ((unsigned short)((((unsigned long)(d)) >> 16) & 0xFFFF))
-#endif
-
-#define LODWORD(q) ((q).u.dwLowDword)
-#define HIDWORD(q) ((q).u.dwHighDword)
-
-#if !defined(MAKEWORD)
-#define MAKEWORD(lb, hb) ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
-#endif
-#if !defined(MAKEDWORD)
-#define MAKEDWORD(lw, hw) ((unsigned long)(((unsigned short)(lw)) | (((unsigned long)((unsigned short)(hw))) << 16)))
-#endif
-
-#endif /* __TMACRO_H__ */
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index b374db5fca81..2a47f5782b71 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -20,37 +20,12 @@
/* For memory mapped IO */
-#define VNSvInPortB(dwIOAddress, pbyData) \
- (*(pbyData) = ioread8(dwIOAddress))
-
-#define VNSvInPortW(dwIOAddress, pwData) \
- (*(pwData) = ioread16(dwIOAddress))
-
-#define VNSvInPortD(dwIOAddress, pdwData) \
- (*(pdwData) = ioread32(dwIOAddress))
-
-#define VNSvOutPortB(dwIOAddress, byData) \
- iowrite8((u8)(byData), dwIOAddress)
-
#define VNSvOutPortW(dwIOAddress, wData) \
iowrite16((u16)(wData), dwIOAddress)
#define VNSvOutPortD(dwIOAddress, dwData) \
iowrite32((u32)(dwData), dwIOAddress)
-#define PCAvDelayByIO(uDelayUnit) \
-do { \
- unsigned char __maybe_unused byData; \
- unsigned long ii; \
- \
- if (uDelayUnit <= 50) { \
- udelay(uDelayUnit); \
- } else { \
- for (ii = 0; ii < (uDelayUnit); ii++) \
- byData = inb(0x61); \
- } \
-} while (0)
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index aca003031995..413e2fc4a50d 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -55,7 +55,6 @@ static struct ieee80211_channel vnt_channels_2ghz[] = {
{ .center_freq = 2484, .hw_value = 14 }
};
-
static struct ieee80211_supported_band vnt_supported_2ghz_band = {
.channels = vnt_channels_2ghz,
.n_channels = ARRAY_SIZE(vnt_channels_2ghz),
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index acbbf8acdf1b..464602c74727 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -82,7 +82,6 @@ static u8 al2230_channel_table1[CB_MAX_CHANNEL_24G][3] = {
{0x06, 0x66, 0x61}
};
-
static u8 vt3226_init_table[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xff, 0x80},
{0x02, 0x82, 0xa1},
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 7951bd63816f..87379edce9a8 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -328,8 +328,7 @@ static int prism2_scan(struct wiphy *wiphy,
(i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
i++)
msg1.channellist.data.data[i] =
- ieee80211_frequency_to_channel(
- request->channels[i]->center_freq);
+ ieee80211_frequency_to_channel(request->channels[i]->center_freq);
msg1.channellist.data.len = request->n_channels;
msg1.maxchanneltime.data = 250;
@@ -476,14 +475,13 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
result = prism2_domibset_uint32(wlandev,
- DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
+ DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
sme->key_idx);
if (result)
goto exit;
/* send key to driver */
- did = didmib_dot11smt_wepdefaultkeystable_key(
- sme->key_idx + 1);
+ did = didmib_dot11smt_wepdefaultkeystable_key(sme->key_idx + 1);
result = prism2_domibset_pstr32(wlandev,
did, sme->key_len,
(u8 *)sme->key);
@@ -589,7 +587,7 @@ static int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
data = MBM_TO_DBM(mbm);
result = prism2_domibset_uint32(wlandev,
- DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
+ DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
data);
if (result) {
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 98c154a8d8c1..0611e37df6ac 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1227,8 +1227,8 @@ struct hfa384x {
struct timer_list throttle;
- struct tasklet_struct reaper_bh;
- struct tasklet_struct completion_bh;
+ struct work_struct reaper_bh;
+ struct work_struct completion_bh;
struct work_struct usb_work;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 938e11a1a0b6..33844526c797 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -191,9 +191,9 @@ static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
static void hfa384x_usb_throttlefn(struct timer_list *t);
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_completion_task(struct work_struct *work);
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work);
static int hfa384x_usbctlx_submit(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
@@ -539,8 +539,8 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
/* Initialize the authentication queue */
skb_queue_head_init(&hw->authq);
- tasklet_setup(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
- tasklet_setup(&hw->completion_bh, hfa384x_usbctlx_completion_task);
+ INIT_WORK(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
+ INIT_WORK(&hw->completion_bh, hfa384x_usbctlx_completion_task);
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
@@ -2585,20 +2585,20 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
/*----------------------------------------------------------------
* hfa384x_usbctlx_reaper_task
*
- * Tasklet to delete dead CTLX objects
+ * Deferred work callback to delete dead CTLX objects
*
* Arguments:
- * data ptr to a struct hfa384x
+ * work contains ptr to a struct hfa384x
*
* Returns:
*
* Call context:
- * Interrupt
+ * Task
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
{
- struct hfa384x *hw = from_tasklet(hw, t, reaper_bh);
+ struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
@@ -2618,21 +2618,21 @@ static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
/*----------------------------------------------------------------
* hfa384x_usbctlx_completion_task
*
- * Tasklet to call completion handlers for returned CTLXs
+ * Deferred work callback to call completion handlers for returned CTLXs
*
* Arguments:
- * data ptr to struct hfa384x
+ * work contains ptr to a struct hfa384x
*
* Returns:
* Nothing
*
* Call context:
- * Interrupt
+ * Task
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_completion_task(struct work_struct *work)
{
- struct hfa384x *hw = from_tasklet(hw, t, completion_bh);
+ struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
@@ -2686,7 +2686,7 @@ static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (reap)
- tasklet_schedule(&hw->reaper_bh);
+ schedule_work(&hw->reaper_bh);
}
/*----------------------------------------------------------------
@@ -2743,7 +2743,7 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
* aren't active and the timers should have been stopped.
*
* The CTLX is migrated to the "completing" queue, and the completing
- * tasklet is scheduled.
+ * work is scheduled.
*
* Arguments:
* hw ptr to a struct hfa384x structure
@@ -2766,7 +2766,7 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw,
* queue.
*/
list_move_tail(&ctlx->list, &hw->ctlxq.completing);
- tasklet_schedule(&hw->completion_bh);
+ schedule_work(&hw->completion_bh);
switch (ctlx->state) {
case CTLX_COMPLETE:
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index dc0749b8eff7..e13da7fadfff 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -165,8 +165,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
/* There's no hardware to shutdown, but the driver
- * might have some tasks or tasklets that must be
- * stopped before we can tear everything down.
+ * might have some tasks that must be stopped before
+ * we can tear everything down.
*/
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
@@ -181,8 +181,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
- tasklet_kill(&hw->completion_bh);
- tasklet_kill(&hw->reaper_bh);
+ cancel_work_sync(&hw->completion_bh);
+ cancel_work_sync(&hw->reaper_bh);
cancel_work_sync(&hw->link_bh);
cancel_work_sync(&hw->commsqual_bh);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index bb3fb18b2316..e6a967ddc08c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
cmd->priv = scmd->cmnd;
- blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
- pscsi_req_done);
+ blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
return 0;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 6554e06e053e..28f87cd8b3ed 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -512,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
* Allow kernel address to register with OP-TEE as kernel
* pages are configured as normal memory only.
*/
- if (virt_addr_valid(start) || is_vmalloc_addr((void *)start))
+ if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
return 0;
mmap_read_lock(mm);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 4986edfbdf67..e92c658dba1c 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
{
- struct tb_cfg_request *req;
- bool found = false;
+ struct tb_cfg_request *req = NULL, *iter;
mutex_lock(&pkg->ctl->request_queue_lock);
- list_for_each_entry(req, &pkg->ctl->request_queue, list) {
- tb_cfg_request_get(req);
- if (req->match(req, pkg)) {
- found = true;
+ list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
+ tb_cfg_request_get(iter);
+ if (iter->match(iter, pkg)) {
+ req = iter;
break;
}
- tb_cfg_request_put(req);
+ tb_cfg_request_put(iter);
}
mutex_unlock(&pkg->ctl->request_queue_lock);
- return found ? req : NULL;
+ return req;
}
/* utility functions */
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 4bc87b0f003a..1333b158a95e 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1250,7 +1250,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
- /* cannot fail - table is allocated bin pcim_iomap_regions */
+ /* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 299712accfe9..ee03fd75a472 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
return NULL;
}
+ tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
+ path->name, tb_route(src->sw), src->port);
+
p = src;
h = src_hopid;
@@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->hops[i].out_port = out_port;
path->hops[i].next_hop_index = next_hop;
+ tb_dump_hop(&path->hops[i], &hop);
+
h = next_hop;
p = out_port->remote;
}
+ tb_dbg(path->tb, "path discovery complete\n");
return path;
err:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ac87e8b50e52..561e1d77240e 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
else
phy |= LANE_ADP_CS_1_LD;
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
+
+ ret = tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
+ return 0;
}
/**
@@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
return !!(widths & width);
}
-static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+/**
+ * tb_port_set_link_width() - Set target link width of the lane adapter
+ * @port: Lane adapter
+ * @width: Target link width (%1 or %2)
+ *
+ * Sets the target link width of the lane adapter to @width. Does not
+ * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_link_width(struct tb_port *port, unsigned int width)
{
u32 val;
int ret;
@@ -1020,13 +1036,59 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
return -EINVAL;
}
- val |= LANE_ADP_CS_1_LB;
-
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
+ * tb_port_set_lane_bonding() - Enable/disable lane bonding
+ * @port: Lane adapter
+ * @bonding: enable/disable bonding
+ *
+ * Enables or disables lane bonding. This should be called after target
+ * link width has been set (tb_port_set_link_width()). Note in most
+ * cases one should use tb_port_lane_bonding_enable() instead to enable
+ * lane bonding.
+ *
+ * As a side effect sets @port->bonding accordingly (and does the same
+ * for lane 1 too).
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (bonding)
+ val |= LANE_ADP_CS_1_LB;
+ else
+ val &= ~LANE_ADP_CS_1_LB;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * When lane 0 bonding is set it will affect lane 1 too so
+ * update both.
+ */
+ port->bonded = bonding;
+ port->dual_link_port->bonded = bonding;
+
+ return 0;
+}
+
+/**
* tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable
*
@@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
if (ret == 1) {
ret = tb_port_set_link_width(port, 2);
if (ret)
- return ret;
+ goto err_lane0;
}
ret = tb_port_get_link_width(port->dual_link_port);
if (ret == 1) {
ret = tb_port_set_link_width(port->dual_link_port, 2);
- if (ret) {
- tb_port_set_link_width(port, 1);
- return ret;
- }
+ if (ret)
+ goto err_lane0;
}
- port->bonded = true;
- port->dual_link_port->bonded = true;
+ ret = tb_port_set_lane_bonding(port, true);
+ if (ret)
+ goto err_lane1;
return 0;
+
+err_lane1:
+ tb_port_set_link_width(port->dual_link_port, 1);
+err_lane0:
+ tb_port_set_link_width(port, 1);
+ return ret;
}
/**
@@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
*
* Disable bonding by setting the link width of the port and the
* other port in case of dual link port.
- *
*/
void tb_port_lane_bonding_disable(struct tb_port *port)
{
- port->dual_link_port->bonded = false;
- port->bonded = false;
-
+ tb_port_set_lane_bonding(port, false);
tb_port_set_link_width(port->dual_link_port, 1);
tb_port_set_link_width(port, 1);
}
@@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
do {
ret = tb_port_get_link_width(port);
- if (ret < 0)
- return ret;
- else if (ret == width)
+ if (ret < 0) {
+ /*
+ * Sometimes we get port locked error when
+ * polling the lanes so we can ignore it and
+ * retry.
+ */
+ if (ret != -EACCES)
+ return ret;
+ } else if (ret == width) {
return 0;
+ }
usleep_range(1000, 2000);
} while (ktime_before(ktime_get(), timeout));
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9beb47b31c75..9a3214fb5038 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb)
static int tb_port_configure_xdomain(struct tb_port *port)
{
- /*
- * XDomain paths currently only support single lane so we must
- * disable the other lane according to USB4 spec.
- */
- tb_port_disable(port->dual_link_port);
-
if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port);
return tb_lc_configure_xdomain(port);
@@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
static void tb_tunnel_dp(struct tb *tb)
{
- int available_up, available_down, ret;
+ int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel;
@@ -913,6 +907,20 @@ static void tb_tunnel_dp(struct tb *tb)
}
/*
+ * This is only applicable to links that are not bonded (so
+ * when Thunderbolt 1 hardware is involved somewhere in the
+ * topology). For these try to share the DP bandwidth between
+ * the two lanes.
+ */
+ link_nr = 1;
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel)) {
+ link_nr = 0;
+ break;
+ }
+ }
+
+ /*
* DP stream needs the domain to be active so runtime resume
* both ends of the tunnel.
*
@@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb)
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
- tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
+ available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index b6fcd8d45324..4602c69913fa 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
const struct tb_port *__port = (_port); \
- level(__port->sw->tb, "%llx:%x: " fmt, \
+ level(__port->sw->tb, "%llx:%u: " fmt, \
tb_route(__port->sw), __port->port, ## arg); \
} while (0)
#define tb_port_WARN(port, fmt, arg...) \
@@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw);
int tb_switch_xhci_connect(struct tb_switch *sw);
void tb_switch_xhci_disconnect(struct tb_switch *sw);
+int tb_port_state(struct tb_port *port);
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
@@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
-int tb_port_state(struct tb_port *port);
+int tb_port_set_link_width(struct tb_port *port, unsigned int width);
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index fe1afa44c56d..33c4c7aed56d 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -527,6 +527,10 @@ enum tb_xdp_type {
PROPERTIES_CHANGED_RESPONSE,
ERROR_RESPONSE,
UUID_REQUEST = 12,
+ LINK_STATE_STATUS_REQUEST = 15,
+ LINK_STATE_STATUS_RESPONSE,
+ LINK_STATE_CHANGE_REQUEST,
+ LINK_STATE_CHANGE_RESPONSE,
};
struct tb_xdp_header {
@@ -540,6 +544,41 @@ struct tb_xdp_error_response {
u32 error;
};
+struct tb_xdp_link_state_status {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_link_state_status_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ u8 slw;
+ u8 tlw;
+ u8 sls;
+ u8 tls;
+ };
+ };
+};
+
+struct tb_xdp_link_state_change {
+ struct tb_xdp_header hdr;
+ u8 tlw;
+ u8 tls;
+ u16 reserved;
+};
+
+struct tb_xdp_link_state_change_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ };
+ };
+};
+
struct tb_xdp_uuid {
struct tb_xdp_header hdr;
};
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index b301eeb0c89b..6a16f61a72a1 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -311,11 +311,16 @@ struct tb_regs_port_header {
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
+#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index be9b1d7e63d2..ee37f8b58f50 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
return sw;
}
+static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
+ struct tb_switch *parent,
+ u64 route, bool bonded)
+{
+ struct tb_switch *sw;
+ int i;
+
+ sw = alloc_dev_default(test, parent, route, bonded);
+ if (!sw)
+ return NULL;
+ /*
+ * Device with:
+ * 2x USB4 Adapters (adapters 1,2 and 3,4),
+ * 1x PCIe Upstream (adapter 9),
+ * 1x PCIe Downstream (adapter 10),
+ * 1x USB3 Upstream (adapter 16),
+ * 1x USB3 Downstream (adapter 17)
+ */
+ for (i = 5; i <= 8; i++)
+ sw->ports[i].disabled = true;
+
+ for (i = 11; i <= 14; i++)
+ sw->ports[i].disabled = true;
+
+ sw->ports[13].cap_adap = 0;
+ sw->ports[14].cap_adap = 0;
+
+ for (i = 18; i <= 19; i++)
+ sw->ports[i].disabled = true;
+
+ sw->generation = 4;
+ sw->credit_allocation = true;
+ sw->max_usb3_credits = 109;
+ sw->min_dp_aux_credits = 0;
+ sw->min_dp_main_credits = 0;
+ sw->max_pcie_credits = 30;
+ sw->max_dma_credits = 1;
+
+ return sw;
+}
+
static struct tb_switch *alloc_dev_usb4(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
@@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
tb_tunnel_free(tunnel);
}
+static void tb_test_credit_alloc_without_dp(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_without_dp(test, host, 0x1, true);
+
+ /*
+ * The device has no DP therefore baMinDPmain = baMinDPaux = 0
+ *
+ * Create PCIe path with buffers less than baMaxPCIe.
+ *
+ * For a device with buffers configurations:
+ * baMaxUSB3 = 109
+ * baMinDPaux = 0
+ * baMinDPmain = 0
+ * baMaxPCIe = 30
+ * baMaxHI = 1
+ * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
+ * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
+ * = Max(6, Min(30, 9) = 9
+ */
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ /* PCIe downstream path */
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
+
+ /* PCIe upstream path */
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+
+ tb_tunnel_free(tunnel);
+}
+
static void tb_test_credit_alloc_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
@@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
KUNIT_CASE(tb_test_credit_alloc_pcie),
+ KUNIT_CASE(tb_test_credit_alloc_without_dp),
KUNIT_CASE(tb_test_credit_alloc_dp),
KUNIT_CASE(tb_test_credit_alloc_usb3),
KUNIT_CASE(tb_test_credit_alloc_dma),
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 118742ec93ed..2c3cf7fc3357 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port,
* Maximum number of DP streams possible through the
* lane adapter.
*/
- ndp = (credits - (usb3 + pcie + spare)) /
- (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
+ ndp = (credits - (usb3 + pcie + spare)) /
+ (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ else
+ ndp = 0;
} else {
ndp = 0;
}
@@ -858,6 +861,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @link_nr: Preferred lane adapter when the link is not bonded
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
* if not limited)
* @max_down: Maximum available downstream bandwidth for the DP tunnel
@@ -869,8 +873,8 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down)
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
paths = tunnel->paths;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
- 1, "Video");
+ link_nr, "Video");
if (!path)
goto err_free;
tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
- TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+ TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
- TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+ TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 03e56076b5bc..bb4d1f1d6d0b 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down);
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 29e2a4f9c9f5..6b02945624ee 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -7,9 +7,37 @@
*/
#include <linux/pm_runtime.h>
+#include <linux/component.h>
+#include <linux/property.h>
#include "tb.h"
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
static ssize_t link_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
return ERR_PTR(ret);
}
+ if (dev_fwnode(&usb4->dev)) {
+ ret = component_add(&usb4->dev, &connector_ops);
+ if (ret) {
+ dev_err(&usb4->dev, "failed to add component\n");
+ device_unregister(&usb4->dev);
+ }
+ }
+
pm_runtime_no_callbacks(&usb4->dev);
pm_runtime_set_active(&usb4->dev);
pm_runtime_enable(&usb4->dev);
@@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
*/
void usb4_port_device_remove(struct usb4_port *usb4)
{
+ if (dev_fwnode(&usb4->dev))
+ component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev);
}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 01d6b724ca51..c31c0d94d8b3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -19,13 +19,38 @@
#include "tb.h"
-#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
-#define XDOMAIN_UUID_RETRIES 10
-#define XDOMAIN_PROPERTIES_RETRIES 10
-#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
-#define XDOMAIN_BONDING_WAIT 100 /* ms */
+#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
+#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
+#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
+#define XDOMAIN_RETRIES 10
#define XDOMAIN_DEFAULT_MAX_HOPID 15
+enum {
+ XDOMAIN_STATE_INIT,
+ XDOMAIN_STATE_UUID,
+ XDOMAIN_STATE_LINK_STATUS,
+ XDOMAIN_STATE_LINK_STATE_CHANGE,
+ XDOMAIN_STATE_LINK_STATUS2,
+ XDOMAIN_STATE_BONDING_UUID_LOW,
+ XDOMAIN_STATE_BONDING_UUID_HIGH,
+ XDOMAIN_STATE_PROPERTIES,
+ XDOMAIN_STATE_ENUMERATED,
+ XDOMAIN_STATE_ERROR,
+};
+
+static const char * const state_names[] = {
+ [XDOMAIN_STATE_INIT] = "INIT",
+ [XDOMAIN_STATE_UUID] = "UUID",
+ [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
+ [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
+ [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
+ [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
+ [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
+ [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
+ [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
+ [XDOMAIN_STATE_ERROR] = "ERROR",
+};
+
struct xdomain_request_work {
struct work_struct work;
struct tb_xdp_header *pkg;
@@ -235,7 +260,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
}
static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
- uuid_t *uuid)
+ uuid_t *uuid, u64 *remote_route)
{
struct tb_xdp_uuid_response res;
struct tb_xdp_uuid req;
@@ -258,6 +283,8 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
return ret;
uuid_copy(uuid, &res.src_uuid);
+ *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
+
return 0;
}
@@ -473,6 +500,112 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
TB_CFG_PKG_XDOMAIN_RESP);
}
+static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 *slw, u8 *tlw,
+ u8 *sls, u8 *tls)
+{
+ struct tb_xdp_link_state_status_response res;
+ struct tb_xdp_link_state_status req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ if (res.status != 0)
+ return -EREMOTEIO;
+
+ *slw = res.slw;
+ *tlw = res.tlw;
+ *sls = res.sls;
+ *tls = res.tls;
+
+ return 0;
+}
+
+static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
+ struct tb_xdomain *xd, u8 sequence)
+{
+ struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+ struct tb_xdp_link_state_status_response res;
+ struct tb_port *port = tb_port_at(xd->route, sw);
+ u32 val[2];
+ int ret;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, xd->route, sequence,
+ LINK_STATE_STATUS_RESPONSE, sizeof(res));
+
+ ret = tb_port_read(port, val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+ res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
+ res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+ res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 tlw, u8 tls)
+{
+ struct tb_xdp_link_state_change_response res;
+ struct tb_xdp_link_state_change req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
+ sizeof(req));
+ req.tlw = tlw;
+ req.tls = tls;
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ return res.status != 0 ? -EREMOTEIO : 0;
+}
+
+static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u32 status)
+{
+ struct tb_xdp_link_state_change_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
+ sizeof(res));
+
+ res.status = status;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
/**
* tb_register_protocol_handler() - Register protocol handler
* @handler: Handler to register
@@ -600,14 +733,13 @@ static void tb_xdp_handle_request(struct work_struct *work)
goto out;
}
- tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
-
xd = tb_xdomain_find_by_route_locked(tb, route);
if (xd)
update_property_block(xd);
switch (pkg->type) {
case PROPERTIES_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain properties request\n", route);
if (xd) {
ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
(const struct tb_xdp_properties *)pkg);
@@ -615,6 +747,9 @@ static void tb_xdp_handle_request(struct work_struct *work)
break;
case PROPERTIES_CHANGED_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain properties changed request\n",
+ route);
+
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
/*
@@ -622,18 +757,51 @@ static void tb_xdp_handle_request(struct work_struct *work)
* the xdomain related to this connection as well in
* case there is a change in services it offers.
*/
- if (xd && device_is_registered(&xd->dev)) {
- queue_delayed_work(tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(50));
- }
+ if (xd && device_is_registered(&xd->dev))
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
break;
case UUID_REQUEST_OLD:
case UUID_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
break;
+ case LINK_STATE_STATUS_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state status request\n",
+ route);
+
+ if (xd) {
+ ret = tb_xdp_link_state_status_response(tb, ctl, xd,
+ sequence);
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
+ }
+ break;
+
+ case LINK_STATE_CHANGE_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state change request\n",
+ route);
+
+ if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
+ const struct tb_xdp_link_state_change *lsc =
+ (const struct tb_xdp_link_state_change *)pkg;
+
+ ret = tb_xdp_link_state_change_response(ctl, route,
+ sequence, 0);
+ xd->target_link_width = lsc->tlw;
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
+ }
+ break;
+
default:
+ tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
tb_xdp_error_response(ctl, route, sequence,
ERROR_NOT_SUPPORTED);
break;
@@ -1000,32 +1168,38 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
return 0;
}
-static void tb_xdomain_get_uuid(struct work_struct *work)
+static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
{
- struct tb_xdomain *xd = container_of(work, typeof(*xd),
- get_uuid_work.work);
struct tb *tb = xd->tb;
uuid_t uuid;
+ u64 route;
int ret;
dev_dbg(&xd->dev, "requesting remote UUID\n");
- ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
+ &route);
if (ret < 0) {
- if (xd->uuid_retries-- > 0) {
+ if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
- queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
- msecs_to_jiffies(100));
+ return -EAGAIN;
} else {
dev_dbg(&xd->dev, "failed to read remote UUID\n");
}
- return;
+ return ret;
}
dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
- if (uuid_equal(&uuid, xd->local_uuid))
- dev_dbg(&xd->dev, "intra-domain loop detected\n");
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ if (route == xd->route)
+ dev_dbg(&xd->dev, "loop back detected\n");
+ else
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+
+ /* Don't bond lanes automatically for loops */
+ xd->bonding_possible = false;
+ }
/*
* If the UUID is different, there is another domain connected
@@ -1035,27 +1209,152 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
xd->is_unplugged = true;
- return;
+ return -ENODEV;
}
/* First time fill in the missing UUID */
if (!xd->remote_uuid) {
xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
if (!xd->remote_uuid)
- return;
+ return -ENOMEM;
}
- /* Now we can start the normal properties exchange */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ return 0;
}
-static void tb_xdomain_get_properties(struct work_struct *work)
+static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
+{
+ struct tb *tb = xd->tb;
+ u8 slw, tlw, sls, tls;
+ int ret;
+
+ dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
+ xd->remote_uuid);
+
+ ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
+ xd->state_retries, &slw, &tlw, &sls,
+ &tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to request remote link status, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "failed to receive remote link status\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
+
+ if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
+ dev_dbg(&xd->dev, "remote adapter is single lane only\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
+ unsigned int width)
+{
+ struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+ struct tb_port *port = tb_port_at(xd->route, sw);
+ struct tb *tb = xd->tb;
+ u8 tlw, tls;
+ u32 val;
+ int ret;
+
+ if (width == 2)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
+ else if (width == 1)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
+ else
+ return -EINVAL;
+
+ /* Use the current target speed */
+ ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+ tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+
+ dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
+ tlw, tls);
+
+ ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
+ xd->state_retries, tlw, tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to change remote link state, retrying\n");
+ return -EAGAIN;
+ }
+ dev_err(&xd->dev, "failed request link state change, aborting\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "received link state change response\n");
+ return 0;
+}
+
+static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
+{
+ struct tb_port *port;
+ int ret, width;
+
+ if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
+ width = 1;
+ } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
+ width = 2;
+ } else {
+ if (xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "link state change request not received yet, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "timeout waiting for link change request\n");
+ return -ETIMEDOUT;
+ }
+
+ port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+
+ /*
+ * We can't use tb_xdomain_lane_bonding_enable() here because it
+ * is the other side that initiates lane bonding. So here we
+ * just set the width to both lane adapters and wait for the
+ * link to transition bonded.
+ */
+ ret = tb_port_set_link_width(port->dual_link_port, width);
+ if (ret) {
+ tb_port_warn(port->dual_link_port,
+ "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_set_link_width(port, width);
+ if (ret) {
+ tb_port_warn(port, "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
+ if (ret) {
+ dev_warn(&xd->dev, "error waiting for link width to become %d\n",
+ width);
+ return ret;
+ }
+
+ port->bonded = width == 2;
+ port->dual_link_port->bonded = width == 2;
+
+ tb_port_update_credits(port);
+ tb_xdomain_update_link_attributes(xd);
+
+ dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
+ return 0;
+}
+
+static int tb_xdomain_get_properties(struct tb_xdomain *xd)
{
- struct tb_xdomain *xd = container_of(work, typeof(*xd),
- get_properties_work.work);
struct tb_property_dir *dir;
struct tb *tb = xd->tb;
bool update = false;
@@ -1066,34 +1365,35 @@ static void tb_xdomain_get_properties(struct work_struct *work)
dev_dbg(&xd->dev, "requesting remote properties\n");
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
- xd->remote_uuid, xd->properties_retries,
+ xd->remote_uuid, xd->state_retries,
&block, &gen);
if (ret < 0) {
- if (xd->properties_retries-- > 0) {
+ if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to request remote properties, retrying\n");
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ return -EAGAIN;
} else {
/* Give up now */
dev_err(&xd->dev,
"failed read XDomain properties from %pUb\n",
xd->remote_uuid);
}
- return;
- }
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ return ret;
+ }
mutex_lock(&xd->lock);
/* Only accept newer generation properties */
- if (xd->remote_properties && gen <= xd->remote_property_block_gen)
+ if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
+ ret = 0;
goto err_free_block;
+ }
dir = tb_property_parse_dir(block, ret);
if (!dir) {
dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ ret = -ENOMEM;
goto err_free_block;
}
@@ -1124,9 +1424,16 @@ static void tb_xdomain_get_properties(struct work_struct *work)
* registered, we notify the userspace that it has changed.
*/
if (!update) {
+ struct tb_port *port;
+
+ /* Now disable lane 1 if bonding was not enabled */
+ port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ if (!port->bonded)
+ tb_port_disable(port->dual_link_port);
+
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
- return;
+ return -ENODEV;
}
dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
xd->vendor, xd->device);
@@ -1138,13 +1445,193 @@ static void tb_xdomain_get_properties(struct work_struct *work)
}
enumerate_services(xd);
- return;
+ return 0;
err_free_dir:
tb_property_free_dir(dir);
err_free_block:
kfree(block);
mutex_unlock(&xd->lock);
+
+ return ret;
+}
+
+static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_UUID;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS2;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
+{
+ if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
+ dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
+ xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
+ } else {
+ dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
+ xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
+ }
+
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_PROPERTIES;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+{
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_state_work(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+ int ret, state = xd->state;
+
+ if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
+ state > XDOMAIN_STATE_ERROR))
+ return;
+
+ dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
+
+ switch (state) {
+ case XDOMAIN_STATE_INIT:
+ if (xd->needs_uuid) {
+ tb_xdomain_queue_uuid(xd);
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_UUID:
+ ret = tb_xdomain_get_uuid(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ xd->state = XDOMAIN_STATE_ERROR;
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ if (xd->bonding_possible)
+ tb_xdomain_queue_link_status(xd);
+ else
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+
+ /*
+ * If any of the lane bonding states fail we skip
+ * bonding completely and try to continue from
+ * reading properties.
+ */
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATE_CHANGE:
+ ret = tb_xdomain_link_state_change(xd, 2);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_link_status2(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS2:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding_uuid_low(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_LOW:
+ tb_xdomain_lane_bonding_enable(xd);
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_HIGH:
+ if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_PROPERTIES:
+ ret = tb_xdomain_get_properties(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ xd->state = XDOMAIN_STATE_ERROR;
+ } else {
+ xd->state = XDOMAIN_STATE_ENUMERATED;
+ }
+ break;
+
+ case XDOMAIN_STATE_ENUMERATED:
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_ERROR:
+ break;
+
+ default:
+ dev_warn(&xd->dev, "unexpected state %d\n", state);
+ break;
+ }
+
+ return;
+
+retry_state:
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_properties_changed(struct work_struct *work)
@@ -1163,13 +1650,13 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
"failed to send properties changed notification, retrying\n");
queue_delayed_work(xd->tb->wq,
&xd->properties_changed_work,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
dev_err(&xd->dev, "failed to send properties changed notification\n");
return;
}
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
@@ -1304,31 +1791,17 @@ static void tb_xdomain_release(struct device *dev)
static void start_handshake(struct tb_xdomain *xd)
{
- xd->uuid_retries = XDOMAIN_UUID_RETRIES;
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
-
- if (xd->needs_uuid) {
- queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
- msecs_to_jiffies(100));
- } else {
- /* Start exchanging properties with the other host */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
- }
+ xd->state = XDOMAIN_STATE_INIT;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void stop_handshake(struct tb_xdomain *xd)
{
- xd->uuid_retries = 0;
- xd->properties_retries = 0;
- xd->properties_changed_retries = 0;
-
- cancel_delayed_work_sync(&xd->get_uuid_work);
- cancel_delayed_work_sync(&xd->get_properties_work);
cancel_delayed_work_sync(&xd->properties_changed_work);
+ cancel_delayed_work_sync(&xd->state_work);
+ xd->properties_changed_retries = 0;
+ xd->state_retries = 0;
}
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
@@ -1389,8 +1862,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
ida_init(&xd->in_hopids);
ida_init(&xd->out_hopids);
mutex_init(&xd->lock);
- INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
- INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
@@ -1405,6 +1877,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
goto err_free_local_uuid;
} else {
xd->needs_uuid = true;
+ xd->bonding_possible = !!down->dual_link_port;
}
device_initialize(&xd->dev);
@@ -1523,9 +1996,9 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
- ret = tb_port_wait_for_link_width(port, 2, 100);
+ ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
if (ret) {
- tb_port_warn(port, "timeout enabling lane bonding\n");
+ tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 533d02b38e02..afb2d373dd47 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -588,10 +588,8 @@ static void change_speed(struct tty_struct *tty, struct serial_state *info,
}
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 9e8ccb8ed6d6..c7968aecd870 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -405,6 +405,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
err_tty_register_device_failed:
free_irq(irq, qtty);
err_dec_line_count:
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
@@ -426,6 +427,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
iounmap(qtty->base);
qtty->base = NULL;
free_irq(qtty->irq, pdev);
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 8d60e0ff67b4..4f9264d005c0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -87,6 +87,25 @@ config HVC_DCC
driver. This console is used through a JTAG only on ARM. If you don't have
a JTAG then you probably don't want this option.
+config HVC_DCC_SERIALIZE_SMP
+ bool "Use DCC only on CPU core 0"
+ depends on SMP && HVC_DCC
+ help
+ This is a DEBUG option to serialize all console input and output to CPU 0.
+ Some external debuggers, do not handle reads/writes from/to DCC on more
+ than one CPU core. Each core has its own DCC device registers, so when a
+ CPU core reads or writes from/to DCC, it only accesses its own DCC device.
+ Since kernel code can run on any CPU core, every time the kernel wants to
+ write to the console, it might write to a different DCC.
+
+ In SMP mode, external debuggers create multiple windows, and each window
+ shows the DCC output only from that core's DCC. The result is that
+ console output is either lost or scattered across windows.
+
+ Enable this option only if you are sure that you do not need features like
+ CPU hotplug to work. For example, during early chipset bringups without
+ debug serial console support. If unsure, say N.
+
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
depends on RISCV_SBI_V01
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index bd61f9372d83..1751108cf763 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2010, 2014, 2022 The Linux Foundation. All rights reserved. */
#include <linux/console.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/kfifo.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <asm/dcc.h>
#include <asm/processor.h>
@@ -15,6 +20,15 @@
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+#define DCC_INBUF_SIZE 128
+#define DCC_OUTBUF_SIZE 1024
+
+/* Lock to serialize access to DCC fifo */
+static DEFINE_SPINLOCK(dcc_lock);
+
+static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
+static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
+
static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
while (__dcc_getstatus() & DCC_STATUS_TX)
@@ -67,24 +81,176 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
return i;
}
+/*
+ * Check if the DCC is enabled. If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core0. That way,
+ * dcc_core0_available will be true only if it's available on core0.
+ */
static bool hvc_dcc_check(void)
{
unsigned long time = jiffies + (HZ / 10);
+ static bool dcc_core0_available;
+
+ /*
+ * If we're not on core 0, but we previously confirmed that DCC is
+ * active, then just return true.
+ */
+ int cpu = get_cpu();
+
+ if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP) && cpu && dcc_core0_available) {
+ put_cpu();
+ return true;
+ }
+
+ put_cpu();
/* Write a test character to check if it is handled */
__dcc_putchar('\n');
while (time_is_after_jiffies(time)) {
- if (!(__dcc_getstatus() & DCC_STATUS_TX))
+ if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+ dcc_core0_available = true;
return true;
+ }
}
return false;
}
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work(struct work_struct *work)
+{
+ unsigned char ch;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ /* While there's data in the output FIFO, write it to the DCC */
+ while (kfifo_get(&outbuf, &ch))
+ hvc_dcc_put_chars(0, &ch, 1);
+
+ /* While we're at it, check for any input characters */
+ while (!kfifo_is_full(&inbuf)) {
+ if (!hvc_dcc_get_chars(0, &ch, 1))
+ break;
+ kfifo_put(&inbuf, ch);
+ }
+
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_pwork, dcc_put_work);
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work(struct work_struct *work)
+{
+ unsigned char ch;
+ unsigned long irqflags;
+
+ /*
+ * Read characters from DCC and put them into the input FIFO, as
+ * long as there is room and we have characters to read.
+ */
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ while (!kfifo_is_full(&inbuf)) {
+ if (!hvc_dcc_get_chars(0, &ch, 1))
+ break;
+ kfifo_put(&inbuf, ch);
+ }
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_gwork, dcc_get_work);
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
+{
+ int len;
+ unsigned long irqflags;
+
+ if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+ return hvc_dcc_put_chars(vt, buf, count);
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+ if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+ len = kfifo_in(&outbuf, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ /*
+ * We just push data to the output FIFO, so schedule the
+ * workqueue that will actually write that data to DCC.
+ * CPU hotplug is disabled in dcc_init so CPU0 cannot be
+ * offlined after the cpu online check.
+ */
+ if (cpu_online(0))
+ schedule_work_on(0, &dcc_pwork);
+
+ return len;
+ }
+
+ /*
+ * If we're already on core 0, and the FIFO is empty, then just
+ * write the data to DCC.
+ */
+ len = hvc_dcc_put_chars(vt, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
+{
+ int len;
+ unsigned long irqflags;
+
+ if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+ return hvc_dcc_get_chars(vt, buf, count);
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+ len = kfifo_out(&inbuf, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ /*
+ * If the FIFO was empty, there may be characters in the DCC
+ * that we haven't read yet. Schedule a workqueue to fill
+ * the input FIFO, so that the next time this function is
+ * called, we'll have data. CPU hotplug is disabled in dcc_init
+ * so CPU0 cannot be offlined after the cpu online check.
+ */
+ if (!len && cpu_online(0))
+ schedule_work_on(0, &dcc_gwork);
+
+ return len;
+ }
+
+ /*
+ * If we're already on core 0, and the FIFO is empty, then just
+ * read the data from DCC.
+ */
+ len = hvc_dcc_get_chars(vt, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ return len;
+}
+
static const struct hv_ops hvc_dcc_get_put_ops = {
- .get_chars = hvc_dcc_get_chars,
- .put_chars = hvc_dcc_put_chars,
+ .get_chars = hvc_dcc0_get_chars,
+ .put_chars = hvc_dcc0_put_chars,
};
static int __init hvc_dcc_console_init(void)
@@ -108,6 +274,26 @@ static int __init hvc_dcc_init(void)
if (!hvc_dcc_check())
return -ENODEV;
+ if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP)) {
+ pr_warn("\n");
+ pr_warn("********************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** HVC_DCC_SERIALIZE_SMP SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this is a DEBUG kernel and unsafe for **\n");
+ pr_warn("** production use and has important feature like CPU hotplug **\n");
+ pr_warn("** disabled. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("********************************************************************\n");
+
+ cpu_hotplug_disable();
+ }
+
p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return PTR_ERR_OR_ZERO(p);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 056ae21a5121..794c7b18aa06 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -13,12 +13,12 @@
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <asm/hvconsole.h>
-#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
@@ -342,9 +342,9 @@ void __init hvc_opal_init_early(void)
* path, so we hard wire it
*/
opal = of_find_node_by_path("/ibm,opal/consoles");
- if (opal)
+ if (opal) {
pr_devel("hvc_opal: Found consoles in new location\n");
- if (!opal) {
+ } else {
opal = of_find_node_by_path("/ibm,opal");
if (opal)
pr_devel("hvc_opal: "
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 72b11aa7e0a6..736b230f5ec0 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -28,10 +28,10 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <linux/of.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
-#include <asm/prom.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index ebaf7500f48f..7c23112dc923 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0)
goto err;
gfn = v;
- info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
+ info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 245da1dfd818..9b7e8246a464 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -581,10 +581,9 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch -- FIXME :js: it is not! */
- if(got)
+ if (got)
tty_flip_buffer_push(&hvcsd->port);
-
- if (!got) {
+ else {
/* Do this _after_ the flip_buffer_push */
spin_lock_irqsave(&hvcsd->lock, flags);
vio_enable_interrupts(hvcsd->vdev);
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index aa81f4835fef..a200d01eceed 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -26,13 +26,13 @@
#include <linux/module.h>
#include <linux/major.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
-#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/vio.h>
#include <asm/param.h>
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 6ebd3e4ed859..70b982b2c6b2 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -528,7 +528,6 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
-#ifdef BOTHER
if (C_BAUD(tty) == BOTHER) {
quot = MXSER_BAUD_BASE % newspd;
quot *= 8;
@@ -539,9 +538,9 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
quot /= newspd;
mxser_set_must_enum_value(info->ioaddr, quot);
- } else
-#endif
+ } else {
mxser_set_must_enum_value(info->ioaddr, 0);
+ }
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd8b86dde525..137eebdcfda9 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -444,6 +444,25 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
return modembits;
}
+static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
+ unsigned long len)
+{
+ char *prefix;
+
+ if (!fname) {
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
+ true);
+ return;
+ }
+
+ prefix = kasprintf(GFP_KERNEL, "%s: ", fname);
+ if (!prefix)
+ return;
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
+ true);
+ kfree(prefix);
+}
+
/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
@@ -508,7 +527,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
else
pr_cont("(F)");
- print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
+ gsm_hex_dump_bytes(NULL, data, dlen);
}
@@ -698,9 +717,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
}
if (debug & 4)
- print_hex_dump_bytes("gsm_data_kick: ",
- DUMP_PREFIX_OFFSET,
- gsm->txframe, len);
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
if (gsmld_output(gsm, gsm->txframe, len) <= 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
@@ -749,7 +766,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
*--dp = msg->ctrl;
if (gsm->initiator)
- *--dp = (msg->addr << 2) | 2 | EA;
+ *--dp = (msg->addr << 2) | CR | EA;
else
*--dp = (msg->addr << 2) | EA;
*fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
@@ -1907,10 +1924,6 @@ static void gsm_queue(struct gsm_mux *gsm)
case UI|PF:
case UIH:
case UIH|PF:
-#if 0
- if (cr)
- goto invalid;
-#endif
if (dlci == NULL || dlci->state != DLCI_OPEN) {
gsm_command(gsm, address, DM|PF);
return;
@@ -2448,8 +2461,7 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return -ENOSPC;
}
if (debug & 4)
- print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
- data, len);
+ gsm_hex_dump_bytes(__func__, data, len);
return gsm->tty->ops->write(gsm->tty, data, len);
}
@@ -2525,8 +2537,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char flags = TTY_NORMAL;
if (debug & 4)
- print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
- cp, count);
+ gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
if (fp)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index efc72104c840..640c9e871044 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1220,21 +1220,34 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
process_echoes(tty);
}
+static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+ return c == START_CHAR(tty) || c == STOP_CHAR(tty);
+}
+
+/* Returns true if c is consumed as flow-control character */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+ if (!n_tty_is_char_flow_ctrl(tty, c))
+ return false;
+
+ if (c == START_CHAR(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ return true;
+ }
+
+ /* STOP_CHAR */
+ stop_tty(tty);
+ return true;
+}
+
static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty)) {
- if (c == START_CHAR(tty)) {
- start_tty(tty);
- process_echoes(tty);
- return;
- }
- if (c == STOP_CHAR(tty)) {
- stop_tty(tty);
- return;
- }
- }
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ return;
if (L_ISIG(tty)) {
if (c == INTR_CHAR(tty)) {
@@ -1975,6 +1988,35 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return ldata->read_tail != canon_head;
}
+/*
+ * If we finished a read at the exact location of an
+ * EOF (special EOL character that's a __DISABLED_CHAR)
+ * in the stream, silently eat the EOF.
+ */
+static void canon_skip_eof(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t tail, canon_head;
+
+ canon_head = smp_load_acquire(&ldata->canon_head);
+ tail = ldata->read_tail;
+
+ // No data?
+ if (tail == canon_head)
+ return;
+
+ // See if the tail position is EOF in the circular buffer
+ tail &= (N_TTY_BUF_SIZE - 1);
+ if (!test_bit(tail, ldata->read_flags))
+ return;
+ if (read_buf(ldata, tail) != __DISABLED_CHAR)
+ return;
+
+ // Clear the EOL bit, skip the EOF char.
+ clear_bit(tail, ldata->read_flags);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
+}
+
/**
* job_control - check job control
* @tty: tty
@@ -2045,7 +2087,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
*/
if (*cookie) {
if (ldata->icanon && !L_EXTPROC(tty)) {
- if (canon_copy_from_read_buf(tty, &kb, &nr))
+ /*
+ * If we have filled the user buffer, see
+ * if we should skip an EOF character before
+ * releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(tty);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
if (copy_from_read_buf(tty, &kb, &nr))
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index a8830e15a22c..696030cfcb09 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -17,6 +17,8 @@
struct uart_8250_dma {
int (*tx_dma)(struct uart_8250_port *p);
int (*rx_dma)(struct uart_8250_port *p);
+ void (*prepare_tx_dma)(struct uart_8250_port *p);
+ void (*prepare_rx_dma)(struct uart_8250_port *p);
/* Filter function */
dma_filter_fn fn;
@@ -83,6 +85,7 @@ struct serial8250_config {
#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks:
* STOP PARITY EPAR SPAR WLEN5 WLEN6
*/
+#define UART_CAP_NOTEMT BIT(18) /* UART without interrupt on TEMT available */
#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */
@@ -120,6 +123,28 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up,
+ int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
static inline int serial_dl_read(struct uart_8250_port *up)
@@ -302,6 +327,22 @@ extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
extern void serial8250_release_dma(struct uart_8250_port *);
+
+static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (dma->prepare_tx_dma)
+ dma->prepare_tx_dma(p);
+}
+
+static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (dma->prepare_rx_dma)
+ dma->prepare_rx_dma(p);
+}
#else
static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 93fe10c680fb..9d2a7856784f 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -429,6 +429,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
memset(&port, 0, sizeof(port));
port.port.private_data = vuart;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 01d30f6ed8fb..cfbd2de0ca6e 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -32,7 +32,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/pm_runtime.h>
#include <linux/io.h>
#ifdef CONFIG_SPARC
#include <linux/sunserialcore.h>
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index b3c3f7e5851a..7133fceed35e 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -34,7 +34,7 @@ static void __dma_tx_complete(void *param)
uart_write_wakeup(&p->port);
ret = serial8250_tx_dma(p);
- if (ret)
+ if (ret || !dma->tx_running)
serial8250_set_THRI(p);
spin_unlock_irqrestore(&p->port.lock, flags);
@@ -80,12 +80,13 @@ int serial8250_tx_dma(struct uart_8250_port *p)
if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
/* We have been called from __dma_tx_complete() */
- serial8250_rpm_put_tx(p);
return 0;
}
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ serial8250_do_prepare_tx_dma(p);
+
desc = dmaengine_prep_slave_single(dma->txchan,
dma->tx_addr + xmit->tail,
dma->tx_size, DMA_MEM_TO_DEV,
@@ -123,6 +124,8 @@ int serial8250_rx_dma(struct uart_8250_port *p)
if (dma->rx_running)
return 0;
+ serial8250_do_prepare_rx_dma(p);
+
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
dma->rx_size, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1769808031c5..f57bbd32ef11 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -12,13 +12,13 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/slab.h>
@@ -33,30 +33,28 @@
/* Offsets for the DesignWare specific registers */
#define DW_UART_USR 0x1f /* UART Status Register */
+#define DW_UART_DMASA 0xa8 /* DMA Software Ack */
+
+#define OCTEON_UART_USR 0x27 /* UART Status Register */
+
+#define RZN1_UART_TDMACR 0x10c /* DMA Control Register Transmit Mode */
+#define RZN1_UART_RDMACR 0x110 /* DMA Control Register Receive Mode */
/* DesignWare specific register fields */
#define DW_UART_MCR_SIRE BIT(6)
-struct dw8250_data {
- struct dw8250_port_data data;
+/* Renesas specific register fields */
+#define RZN1_UART_xDMACR_DMA_EN BIT(0)
+#define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1)
+#define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST (3 << 1)
+#define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3)
- u8 usr_reg;
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
+/* Quirks */
+#define DW_UART_QUIRK_OCTEON BIT(0)
+#define DW_UART_QUIRK_ARMADA_38X BIT(1)
+#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
+#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -238,6 +236,8 @@ static int dw8250_handle_irq(struct uart_port *p)
struct uart_8250_port *up = up_to_u8250p(p);
struct dw8250_data *d = to_dw8250_data(p->private_data);
unsigned int iir = p->serial_in(p, UART_IIR);
+ bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT;
+ unsigned int quirks = d->pdata->quirks;
unsigned int status;
unsigned long flags;
@@ -251,7 +251,7 @@ static int dw8250_handle_irq(struct uart_port *p)
* This problem has only been observed so far when not in DMA mode
* so we limit the workaround only to non-DMA mode.
*/
- if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) {
+ if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
status = p->serial_in(p, UART_LSR);
@@ -261,12 +261,21 @@ static int dw8250_handle_irq(struct uart_port *p)
spin_unlock_irqrestore(&p->lock, flags);
}
+ /* Manually stop the Rx DMA transfer when acting as flow controller */
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
+ status = p->serial_in(p, UART_LSR);
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
+ dw8250_writel_ext(p, DW_UART_DMASA, 1);
+ }
+ }
+
if (serial8250_handle_irq(p, iir))
return 1;
if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
- (void)p->serial_in(p, d->usr_reg);
+ (void)p->serial_in(p, d->pdata->usr_reg);
return 1;
}
@@ -384,11 +393,48 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev;
}
+static u32 dw8250_rzn1_get_dmacr_burst(int max_burst)
+{
+ if (max_burst >= 8)
+ return RZN1_UART_xDMACR_8_WORD_BURST;
+ else if (max_burst >= 4)
+ return RZN1_UART_xDMACR_4_WORD_BURST;
+ else
+ return RZN1_UART_xDMACR_1_WORD_BURST;
+}
+
+static void dw8250_prepare_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_port *up = &p->port;
+ struct uart_8250_dma *dma = p->dma;
+ u32 val;
+
+ dw8250_writel_ext(up, RZN1_UART_TDMACR, 0);
+ val = dw8250_rzn1_get_dmacr_burst(dma->txconf.dst_maxburst) |
+ RZN1_UART_xDMACR_BLK_SZ(dma->tx_size) |
+ RZN1_UART_xDMACR_DMA_EN;
+ dw8250_writel_ext(up, RZN1_UART_TDMACR, val);
+}
+
+static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
+{
+ struct uart_port *up = &p->port;
+ struct uart_8250_dma *dma = p->dma;
+ u32 val;
+
+ dw8250_writel_ext(up, RZN1_UART_RDMACR, 0);
+ val = dw8250_rzn1_get_dmacr_burst(dma->rxconf.src_maxburst) |
+ RZN1_UART_xDMACR_BLK_SZ(dma->rx_size) |
+ RZN1_UART_xDMACR_DMA_EN;
+ dw8250_writel_ext(up, RZN1_UART_RDMACR, val);
+}
+
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
struct device_node *np = p->dev->of_node;
if (np) {
+ unsigned int quirks = data->pdata->quirks;
int id;
/* get index of serial line, if found in DT aliases */
@@ -396,12 +442,11 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
if (id >= 0)
p->line = id;
#ifdef CONFIG_64BIT
- if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) {
+ if (quirks & DW_UART_QUIRK_OCTEON) {
p->serial_in = dw8250_serial_inq;
p->serial_out = dw8250_serial_outq;
p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p->type = PORT_OCTEON;
- data->usr_reg = 0x27;
data->skip_autocfg = true;
}
#endif
@@ -412,10 +457,16 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p->serial_out = dw8250_serial_out32be;
}
- if (of_device_is_compatible(np, "marvell,armada-38x-uart"))
+ if (quirks & DW_UART_QUIRK_ARMADA_38X)
p->serial_out = dw8250_serial_out38x;
- if (of_device_is_compatible(np, "starfive,jh7100-uart"))
+ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
p->set_termios = dw8250_do_set_termios;
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+ data->data.dma.txconf.device_fc = 1;
+ data->data.dma.rxconf.device_fc = 1;
+ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+ }
} else if (acpi_dev_present("APMC0D08", NULL, -1)) {
p->iotype = UPIO_MEM32;
@@ -433,21 +484,30 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
}
}
+static void dw8250_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void dw8250_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {}, *up = &uart;
- struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
+ struct resource *regs;
int irq;
int err;
u32 val;
- if (!regs) {
- dev_err(dev, "no registers defined\n");
- return -EINVAL;
- }
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return dev_err_probe(dev, -EINVAL, "no registers defined\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -476,7 +536,7 @@ static int dw8250_probe(struct platform_device *pdev)
return -ENOMEM;
data->data.dma.fn = dw8250_fallback_dma_filter;
- data->usr_reg = DW_UART_USR;
+ data->pdata = device_get_match_data(p->dev);
p->private_data = &data->data;
data->uart_16550_compatible = device_property_read_bool(dev,
@@ -532,37 +592,41 @@ static int dw8250_probe(struct platform_device *pdev)
err = clk_prepare_enable(data->clk);
if (err)
- dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+ return dev_err_probe(dev, err, "could not enable optional baudclk\n");
+
+ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk);
+ if (err)
+ return err;
if (data->clk)
p->uartclk = clk_get_rate(data->clk);
/* If no clock rate is defined, fail. */
- if (!p->uartclk) {
- dev_err(dev, "clock rate not defined\n");
- err = -EINVAL;
- goto err_clk;
- }
+ if (!p->uartclk)
+ return dev_err_probe(dev, -EINVAL, "clock rate not defined\n");
data->pclk = devm_clk_get_optional(dev, "apb_pclk");
- if (IS_ERR(data->pclk)) {
- err = PTR_ERR(data->pclk);
- goto err_clk;
- }
+ if (IS_ERR(data->pclk))
+ return PTR_ERR(data->pclk);
err = clk_prepare_enable(data->pclk);
- if (err) {
- dev_err(dev, "could not enable apb_pclk\n");
- goto err_clk;
- }
+ if (err)
+ return dev_err_probe(dev, err, "could not enable apb_pclk\n");
+
+ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk);
+ if (err)
+ return err;
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
- if (IS_ERR(data->rst)) {
- err = PTR_ERR(data->rst);
- goto err_pclk;
- }
+ if (IS_ERR(data->rst))
+ return PTR_ERR(data->rst);
+
reset_control_deassert(data->rst);
+ err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
+ if (err)
+ return err;
+
dw8250_quirks(p, data);
/* If the Busy Functionality is not implemented, don't handle it */
@@ -580,10 +644,8 @@ static int dw8250_probe(struct platform_device *pdev)
}
data->data.line = serial8250_register_8250_port(up);
- if (data->data.line < 0) {
- err = data->data.line;
- goto err_reset;
- }
+ if (data->data.line < 0)
+ return data->data.line;
/*
* Some platforms may provide a reference clock shared between several
@@ -593,9 +655,8 @@ static int dw8250_probe(struct platform_device *pdev)
if (data->clk) {
err = clk_notifier_register(data->clk, &data->clk_notifier);
if (err)
- dev_warn(p->dev, "Failed to set the clock notifier\n");
- else
- queue_work(system_unbound_wq, &data->clk_work);
+ return dev_err_probe(dev, err, "Failed to set the clock notifier\n");
+ queue_work(system_unbound_wq, &data->clk_work);
}
platform_set_drvdata(pdev, data);
@@ -604,17 +665,6 @@ static int dw8250_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
-
-err_reset:
- reset_control_assert(data->rst);
-
-err_pclk:
- clk_disable_unprepare(data->pclk);
-
-err_clk:
- clk_disable_unprepare(data->clk);
-
- return err;
}
static int dw8250_remove(struct platform_device *pdev)
@@ -632,12 +682,6 @@ static int dw8250_remove(struct platform_device *pdev)
serial8250_unregister_port(data->data.line);
- reset_control_assert(data->rst);
-
- clk_disable_unprepare(data->pclk);
-
- clk_disable_unprepare(data->clk);
-
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@@ -693,12 +737,37 @@ static const struct dev_pm_ops dw8250_pm_ops = {
SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
+static const struct dw8250_platform_data dw8250_dw_apb = {
+ .usr_reg = DW_UART_USR,
+};
+
+static const struct dw8250_platform_data dw8250_octeon_3860_data = {
+ .usr_reg = OCTEON_UART_USR,
+ .quirks = DW_UART_QUIRK_OCTEON,
+};
+
+static const struct dw8250_platform_data dw8250_armada_38x_data = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_ARMADA_38X,
+};
+
+static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+ .usr_reg = DW_UART_USR,
+ .cpr_val = 0x00012f32,
+ .quirks = DW_UART_QUIRK_IS_DMA_FC,
+};
+
+static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_SKIP_SET_RATE,
+};
+
static const struct of_device_id dw8250_of_match[] = {
- { .compatible = "snps,dw-apb-uart" },
- { .compatible = "cavium,octeon-3860-uart" },
- { .compatible = "marvell,armada-38x-uart" },
- { .compatible = "renesas,rzn1-uart" },
- { .compatible = "starfive,jh7100-uart" },
+ { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb },
+ { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data },
+ { .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data },
+ { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data },
+ { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 622d3b0d89e7..fbabfdd8c7b8 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -2,19 +2,32 @@
/* Synopsys DesignWare 8250 library. */
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
-#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
+#define DW_UART_TCR 0xac /* Transceiver Control Register (RS485) */
+#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
+#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Transceiver Control Register bits */
+#define DW_UART_TCR_RS485_EN BIT(0)
+#define DW_UART_TCR_RE_POL BIT(1)
+#define DW_UART_TCR_DE_POL BIT(2)
+#define DW_UART_TCR_XFER_MODE GENMASK(4, 3)
+#define DW_UART_TCR_XFER_MODE_DE_DURING_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 0)
+#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
+#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+
/* Component Parameter Register bits */
#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
#define DW_UART_CPR_AFCE_MODE (1 << 4)
@@ -32,21 +45,6 @@
/* Helper for FIFO size calculation */
#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
-static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
-{
- if (p->iotype == UPIO_MEM32BE)
- return ioread32be(p->membase + offset);
- return readl(p->membase + offset);
-}
-
-static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
-{
- if (p->iotype == UPIO_MEM32BE)
- iowrite32be(reg, p->membase + offset);
- else
- writel(reg, p->membase + offset);
-}
-
/*
* divisor = div(I) + div(F)
* "I" means integer, "F" means fractional
@@ -87,11 +85,87 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
+static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+{
+ u32 tcr;
+
+ tcr = dw8250_readl_ext(p, DW_UART_TCR);
+ tcr &= ~DW_UART_TCR_XFER_MODE;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Clear unsupported flags. */
+ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
+ SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
+ tcr |= DW_UART_TCR_RS485_EN;
+
+ if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
+ } else {
+ /* HW does not support same DE level for tx and rx */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
+
+ tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
+ }
+ dw8250_writel_ext(p, DW_UART_DE_EN, 1);
+ dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+ } else {
+ rs485->flags = 0;
+
+ tcr &= ~DW_UART_TCR_RS485_EN;
+ }
+
+ /* Reset to default polarity */
+ tcr |= DW_UART_TCR_DE_POL;
+ tcr &= ~DW_UART_TCR_RE_POL;
+
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND))
+ tcr &= ~DW_UART_TCR_DE_POL;
+ if (device_property_read_bool(p->dev, "rs485-rx-active-high"))
+ tcr |= DW_UART_TCR_RE_POL;
+
+ dw8250_writel_ext(p, DW_UART_TCR, tcr);
+
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+
+ p->rs485 = *rs485;
+
+ return 0;
+}
+
+/*
+ * Tests if RE_EN register can have non-zero value to see if RS-485 HW support
+ * is present.
+ */
+static bool dw8250_detect_rs485_hw(struct uart_port *p)
+{
+ u32 reg;
+
+ dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+ reg = dw8250_readl_ext(p, DW_UART_RE_EN);
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ return reg;
+}
+
void dw8250_setup_port(struct uart_port *p)
{
+ struct dw8250_port_data *pd = p->private_data;
+ struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg;
+ pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
+ if (pd->hw_rs485_support) {
+ p->rs485_config = dw8250_rs485_config;
+ } else {
+ p->rs485_config = serial8250_em485_config;
+ up->rs485_start_tx = serial8250_em485_start_tx;
+ up->rs485_stop_tx = serial8250_em485_stop_tx;
+ }
+ up->capabilities |= UART_CAP_NOTEMT;
+
/*
* If the Component Version Register returns zero, we know that
* ADDITIONAL_FEATURES are not enabled. No need to go any further.
@@ -108,14 +182,16 @@ void dw8250_setup_port(struct uart_port *p)
dw8250_writel_ext(p, DW_UART_DLF, 0);
if (reg) {
- struct dw8250_port_data *d = p->private_data;
-
- d->dlf_size = fls(reg);
+ pd->dlf_size = fls(reg);
p->get_divisor = dw8250_get_divisor;
p->set_divisor = dw8250_set_divisor;
}
reg = dw8250_readl_ext(p, DW_UART_CPR);
+ if (!reg) {
+ reg = data->pdata->cpr_val;
+ dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
+ }
if (!reg)
return;
@@ -124,7 +200,7 @@ void dw8250_setup_port(struct uart_port *p)
p->type = PORT_16550A;
p->flags |= UPF_FIXED_TYPE;
p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
- up->capabilities = UART_CAP_FIFO;
+ up->capabilities = UART_CAP_FIFO | UART_CAP_NOTEMT;
}
if (reg & DW_UART_CPR_AFCE_MODE)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index 83d528e5cc21..055bfdc87985 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -1,10 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Synopsys DesignWare 8250 library header file. */
+#include <linux/io.h>
+#include <linux/notifier.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "8250.h"
+struct clk;
+struct reset_control;
+
struct dw8250_port_data {
/* Port properties */
int line;
@@ -14,7 +20,52 @@ struct dw8250_port_data {
/* Hardware configuration */
u8 dlf_size;
+
+ /* RS485 variables */
+ bool hw_rs485_support;
+};
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_val;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
};
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
+
+static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ return ioread32be(p->membase + offset);
+ return readl(p->membase + offset);
+}
+
+static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ iowrite32be(reg, p->membase + offset);
+ else
+ writel(reg, p->membase + offset);
+}
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 251f0018ae8c..dba5950b8d0e 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -200,12 +200,12 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!pdata)
return -EINVAL;
- /* Hardware do not support same RTS level on send and receive */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
if (rs485->flags & SER_RS485_ENABLED) {
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
} else {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 21053db93ff1..54051ec7b499 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -54,9 +54,6 @@
#define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
-#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
-#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
-
#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
@@ -575,10 +572,6 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.dma = data->dma;
#endif
- /* Set AP UART new register map */
- writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
- (MTK_UART_FEATURE_SEL << uart.port.regshift));
-
/* Disable Rate Fix function */
writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift));
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index be8626234627..5a699a1aa79c 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -326,6 +326,8 @@ static const struct of_device_id of_platform_serial_table[] = {
.data = (void *)PORT_ALTR_16550_F64, },
{ .compatible = "altr,16550-FIFO128",
.data = (void *)PORT_ALTR_16550_F128, },
+ { .compatible = "fsl,16550-FIFO64",
+ .data = (void *)PORT_16550A_FSL64, },
{ .compatible = "mediatek,mtk-btif",
.data = (void *)PORT_MTK_BTIF, },
{ .compatible = "mrvl,mmp-uart",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a293e9f107d0..a17619db7939 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/tty.h>
@@ -994,41 +995,29 @@ static void pci_ite887x_exit(struct pci_dev *dev)
}
/*
- * EndRun Technologies.
- * Determine the number of ports available on the device.
+ * Oxford Semiconductor Inc.
+ * Check if an OxSemi device is part of the Tornado range of devices.
*/
#define PCI_VENDOR_ID_ENDRUN 0x7401
#define PCI_DEVICE_ID_ENDRUN_1588 0xe100
-static int pci_endrun_init(struct pci_dev *dev)
+static bool pci_oxsemi_tornado_p(struct pci_dev *dev)
{
- u8 __iomem *p;
- unsigned long deviceID;
- unsigned int number_uarts = 0;
+ /* OxSemi Tornado devices are all 0xCxxx */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+ (dev->device & 0xf000) != 0xc000)
+ return false;
- /* EndRun device is all 0xexxx */
+ /* EndRun devices are all 0xExxx */
if (dev->vendor == PCI_VENDOR_ID_ENDRUN &&
- (dev->device & 0xf000) != 0xe000)
- return 0;
-
- p = pci_iomap(dev, 0, 5);
- if (p == NULL)
- return -ENOMEM;
+ (dev->device & 0xf000) != 0xe000)
+ return false;
- deviceID = ioread32(p);
- /* EndRun device */
- if (deviceID == 0x07000200) {
- number_uarts = ioread8(p + 4);
- pci_dbg(dev, "%d ports detected on EndRun PCI Express device\n", number_uarts);
- }
- pci_iounmap(dev, p);
- return number_uarts;
+ return true;
}
/*
- * Oxford Semiconductor Inc.
- * Check that device is part of the Tornado range of devices, then determine
- * the number of ports available on the device.
+ * Determine the number of ports available on a Tornado device.
*/
static int pci_oxsemi_tornado_init(struct pci_dev *dev)
{
@@ -1036,9 +1025,7 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
unsigned long deviceID;
unsigned int number_uarts = 0;
- /* OxSemi Tornado devices are all 0xCxxx */
- if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
- (dev->device & 0xF000) != 0xC000)
+ if (!pci_oxsemi_tornado_p(dev))
return 0;
p = pci_iomap(dev, 0, 5);
@@ -1049,12 +1036,217 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
/* Tornado device */
if (deviceID == 0x07000200) {
number_uarts = ioread8(p + 4);
- pci_dbg(dev, "%d ports detected on Oxford PCI Express device\n", number_uarts);
+ pci_dbg(dev, "%d ports detected on %s PCI Express device\n",
+ number_uarts,
+ dev->vendor == PCI_VENDOR_ID_ENDRUN ?
+ "EndRun" : "Oxford");
}
pci_iounmap(dev, p);
return number_uarts;
}
+/* Tornado-specific constants for the TCR and CPR registers; see below. */
+#define OXSEMI_TORNADO_TCR_MASK 0xf
+#define OXSEMI_TORNADO_CPR_MASK 0x1ff
+#define OXSEMI_TORNADO_CPR_MIN 0x008
+#define OXSEMI_TORNADO_CPR_DEF 0x10f
+
+/*
+ * Determine the oversampling rate, the clock prescaler, and the clock
+ * divisor for the requested baud rate. The clock rate is 62.5 MHz,
+ * which is four times the baud base, and the prescaler increments in
+ * steps of 1/8. Therefore to make calculations on integers we need
+ * to use a scaled clock rate, which is the baud base multiplied by 32
+ * (or our assumed UART clock rate multiplied by 2).
+ *
+ * The allowed oversampling rates are from 4 up to 16 inclusive (values
+ * from 0 to 3 inclusive map to 16). Likewise the clock prescaler allows
+ * values between 1.000 and 63.875 inclusive (operation for values from
+ * 0.000 to 0.875 has not been specified). The clock divisor is the usual
+ * unsigned 16-bit integer.
+ *
+ * For the most accurate baud rate we use a table of predetermined
+ * oversampling rates and clock prescalers that records all possible
+ * products of the two parameters in the range from 4 up to 255 inclusive,
+ * and additionally 335 for the 1500000bps rate, with the prescaler scaled
+ * by 8. The table is sorted by the decreasing value of the oversampling
+ * rate and ties are resolved by sorting by the decreasing value of the
+ * product. This way preference is given to higher oversampling rates.
+ *
+ * We iterate over the table and choose the product of an oversampling
+ * rate and a clock prescaler that gives the lowest integer division
+ * result deviation, or if an exact integer divider is found we stop
+ * looking for it right away. We do some fixup if the resulting clock
+ * divisor required would be out of its unsigned 16-bit integer range.
+ *
+ * Finally we abuse the supposed fractional part returned to encode the
+ * 4-bit value of the oversampling rate and the 9-bit value of the clock
+ * prescaler which will end up in the TCR and CPR/CPR2 registers.
+ */
+static unsigned int pci_oxsemi_tornado_get_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ static u8 p[][2] = {
+ { 16, 14, }, { 16, 13, }, { 16, 12, }, { 16, 11, },
+ { 16, 10, }, { 16, 9, }, { 16, 8, }, { 15, 17, },
+ { 15, 16, }, { 15, 15, }, { 15, 14, }, { 15, 13, },
+ { 15, 12, }, { 15, 11, }, { 15, 10, }, { 15, 9, },
+ { 15, 8, }, { 14, 18, }, { 14, 17, }, { 14, 14, },
+ { 14, 13, }, { 14, 12, }, { 14, 11, }, { 14, 10, },
+ { 14, 9, }, { 14, 8, }, { 13, 19, }, { 13, 18, },
+ { 13, 17, }, { 13, 13, }, { 13, 12, }, { 13, 11, },
+ { 13, 10, }, { 13, 9, }, { 13, 8, }, { 12, 19, },
+ { 12, 18, }, { 12, 17, }, { 12, 11, }, { 12, 9, },
+ { 12, 8, }, { 11, 23, }, { 11, 22, }, { 11, 21, },
+ { 11, 20, }, { 11, 19, }, { 11, 18, }, { 11, 17, },
+ { 11, 11, }, { 11, 10, }, { 11, 9, }, { 11, 8, },
+ { 10, 25, }, { 10, 23, }, { 10, 20, }, { 10, 19, },
+ { 10, 17, }, { 10, 10, }, { 10, 9, }, { 10, 8, },
+ { 9, 27, }, { 9, 23, }, { 9, 21, }, { 9, 19, },
+ { 9, 18, }, { 9, 17, }, { 9, 9, }, { 9, 8, },
+ { 8, 31, }, { 8, 29, }, { 8, 23, }, { 8, 19, },
+ { 8, 17, }, { 8, 8, }, { 7, 35, }, { 7, 31, },
+ { 7, 29, }, { 7, 25, }, { 7, 23, }, { 7, 21, },
+ { 7, 19, }, { 7, 17, }, { 7, 15, }, { 7, 14, },
+ { 7, 13, }, { 7, 12, }, { 7, 11, }, { 7, 10, },
+ { 7, 9, }, { 7, 8, }, { 6, 41, }, { 6, 37, },
+ { 6, 31, }, { 6, 29, }, { 6, 23, }, { 6, 19, },
+ { 6, 17, }, { 6, 13, }, { 6, 11, }, { 6, 10, },
+ { 6, 9, }, { 6, 8, }, { 5, 67, }, { 5, 47, },
+ { 5, 43, }, { 5, 41, }, { 5, 37, }, { 5, 31, },
+ { 5, 29, }, { 5, 25, }, { 5, 23, }, { 5, 19, },
+ { 5, 17, }, { 5, 15, }, { 5, 13, }, { 5, 11, },
+ { 5, 10, }, { 5, 9, }, { 5, 8, }, { 4, 61, },
+ { 4, 59, }, { 4, 53, }, { 4, 47, }, { 4, 43, },
+ { 4, 41, }, { 4, 37, }, { 4, 31, }, { 4, 29, },
+ { 4, 23, }, { 4, 19, }, { 4, 17, }, { 4, 13, },
+ { 4, 9, }, { 4, 8, },
+ };
+ /* Scale the quotient for comparison to get the fractional part. */
+ const unsigned int quot_scale = 65536;
+ unsigned int sclk = port->uartclk * 2;
+ unsigned int sdiv = DIV_ROUND_CLOSEST(sclk, baud);
+ unsigned int best_squot;
+ unsigned int squot;
+ unsigned int quot;
+ u16 cpr;
+ u8 tcr;
+ int i;
+
+ /* Old custom speed handling. */
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
+ unsigned int cust_div = port->custom_divisor;
+
+ quot = cust_div & UART_DIV_MAX;
+ tcr = (cust_div >> 16) & OXSEMI_TORNADO_TCR_MASK;
+ cpr = (cust_div >> 20) & OXSEMI_TORNADO_CPR_MASK;
+ if (cpr < OXSEMI_TORNADO_CPR_MIN)
+ cpr = OXSEMI_TORNADO_CPR_DEF;
+ } else {
+ best_squot = quot_scale;
+ for (i = 0; i < ARRAY_SIZE(p); i++) {
+ unsigned int spre;
+ unsigned int srem;
+ u8 cp;
+ u8 tc;
+
+ tc = p[i][0];
+ cp = p[i][1];
+ spre = tc * cp;
+
+ srem = sdiv % spre;
+ if (srem > spre / 2)
+ srem = spre - srem;
+ squot = DIV_ROUND_CLOSEST(srem * quot_scale, spre);
+
+ if (srem == 0) {
+ tcr = tc;
+ cpr = cp;
+ quot = sdiv / spre;
+ break;
+ } else if (squot < best_squot) {
+ best_squot = squot;
+ tcr = tc;
+ cpr = cp;
+ quot = DIV_ROUND_CLOSEST(sdiv, spre);
+ }
+ }
+ while (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1 &&
+ quot % 2 == 0) {
+ quot >>= 1;
+ tcr <<= 1;
+ }
+ while (quot > UART_DIV_MAX) {
+ if (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1) {
+ quot >>= 1;
+ tcr <<= 1;
+ } else if (cpr <= OXSEMI_TORNADO_CPR_MASK >> 1) {
+ quot >>= 1;
+ cpr <<= 1;
+ } else {
+ quot = quot * cpr / OXSEMI_TORNADO_CPR_MASK;
+ cpr = OXSEMI_TORNADO_CPR_MASK;
+ }
+ }
+ }
+
+ *frac = (cpr << 8) | (tcr & OXSEMI_TORNADO_TCR_MASK);
+ return quot;
+}
+
+/*
+ * Set the oversampling rate in the transmitter clock cycle register (TCR),
+ * the clock prescaler in the clock prescaler register (CPR and CPR2), and
+ * the clock divisor in the divisor latch (DLL and DLM). Note that for
+ * backwards compatibility any write to CPR clears CPR2 and therefore CPR
+ * has to be written first, followed by CPR2, which occupies the location
+ * of CKS used with earlier UART designs.
+ */
+static void pci_oxsemi_tornado_set_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int quot,
+ unsigned int quot_frac)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u8 cpr2 = quot_frac >> 16;
+ u8 cpr = quot_frac >> 8;
+ u8 tcr = quot_frac;
+
+ serial_icr_write(up, UART_TCR, tcr);
+ serial_icr_write(up, UART_CPR, cpr);
+ serial_icr_write(up, UART_CKS, cpr2);
+ serial8250_do_set_divisor(port, baud, quot, 0);
+}
+
+/*
+ * For Tornado devices we force MCR[7] set for the Divide-by-M N/8 baud rate
+ * generator prescaler (CPR and CPR2). Otherwise no prescaler would be used.
+ */
+static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port,
+ unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ up->mcr |= UART_MCR_CLKSEL;
+ serial8250_do_set_mctrl(port, mctrl);
+}
+
+static int pci_oxsemi_tornado_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *up, int idx)
+{
+ struct pci_dev *dev = priv->dev;
+
+ if (pci_oxsemi_tornado_p(dev)) {
+ up->port.get_divisor = pci_oxsemi_tornado_get_divisor;
+ up->port.set_divisor = pci_oxsemi_tornado_set_divisor;
+ up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl;
+ }
+
+ return pci_default_setup(priv, board, up, idx);
+}
+
static int pci_asix_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -2244,7 +2436,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .init = pci_endrun_init,
+ .init = pci_oxsemi_tornado_init,
.setup = pci_default_setup,
},
/*
@@ -2256,7 +2448,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_MAINPINE,
@@ -2264,7 +2456,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_DIGI,
@@ -2272,7 +2464,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_SUBVENDOR_ID_IBM,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
@@ -2589,7 +2781,7 @@ enum pci_board_num_t {
pbn_b0_2_1843200,
pbn_b0_4_1843200,
- pbn_b0_1_3906250,
+ pbn_b0_1_15625000,
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
@@ -2667,12 +2859,11 @@ enum pci_board_num_t {
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
- pbn_endrun_2_3906250,
pbn_oxsemi,
- pbn_oxsemi_1_3906250,
- pbn_oxsemi_2_3906250,
- pbn_oxsemi_4_3906250,
- pbn_oxsemi_8_3906250,
+ pbn_oxsemi_1_15625000,
+ pbn_oxsemi_2_15625000,
+ pbn_oxsemi_4_15625000,
+ pbn_oxsemi_8_15625000,
pbn_intel_i960,
pbn_sgi_ioc3,
pbn_computone_4,
@@ -2815,10 +3006,10 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
},
- [pbn_b0_1_3906250] = {
+ [pbn_b0_1_15625000] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 8,
},
@@ -3190,20 +3381,6 @@ static struct pciserial_board pci_boards[] = {
},
/*
- * EndRun Technologies
- * Uses the size of PCI Base region 0 to
- * signal now many ports are available
- * 2 port 952 Uart support
- */
- [pbn_endrun_2_3906250] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 3906250,
- .uart_offset = 0x200,
- .first_offset = 0x1000,
- },
-
- /*
* This board uses the size of PCI Base region 0 to
* signal now many ports are available
*/
@@ -3213,31 +3390,31 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
- [pbn_oxsemi_1_3906250] = {
+ [pbn_oxsemi_1_15625000] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_2_3906250] = {
+ [pbn_oxsemi_2_15625000] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_4_3906250] = {
+ [pbn_oxsemi_4_15625000] = {
.flags = FL_BASE0,
.num_ports = 4,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_8_3906250] = {
+ [pbn_oxsemi_8_15625000] = {
.flags = FL_BASE0,
.num_ports = 8,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -3518,6 +3695,12 @@ static struct pciserial_board pci_boards[] = {
},
};
+#define REPORT_CONFIG(option) \
+ (IS_ENABLED(CONFIG_##option) ? 0 : (kernel_ulong_t)&#option)
+#define REPORT_8250_CONFIG(option) \
+ (IS_ENABLED(CONFIG_SERIAL_8250_##option) ? \
+ 0 : (kernel_ulong_t)&"SERIAL_8250_"#option)
+
static const struct pci_device_id blacklist[] = {
/* softmodems */
{ PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
@@ -3525,40 +3708,43 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
/* multi-io cards handled by parport_serial */
- { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
- { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
- { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+ /* WCH CH353 2S1P */
+ { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ /* WCH CH353 1S1P */
+ { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ /* WCH CH382 2S1P */
+ { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
/* Intel platforms with MID UART */
- { PCI_VDEVICE(INTEL, 0x081b), },
- { PCI_VDEVICE(INTEL, 0x081c), },
- { PCI_VDEVICE(INTEL, 0x081d), },
- { PCI_VDEVICE(INTEL, 0x1191), },
- { PCI_VDEVICE(INTEL, 0x18d8), },
- { PCI_VDEVICE(INTEL, 0x19d8), },
+ { PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x081c), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x081d), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x1191), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x18d8), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x19d8), REPORT_8250_CONFIG(MID), },
/* Intel platforms with DesignWare UART */
- { PCI_VDEVICE(INTEL, 0x0936), },
- { PCI_VDEVICE(INTEL, 0x0f0a), },
- { PCI_VDEVICE(INTEL, 0x0f0c), },
- { PCI_VDEVICE(INTEL, 0x228a), },
- { PCI_VDEVICE(INTEL, 0x228c), },
- { PCI_VDEVICE(INTEL, 0x4b96), },
- { PCI_VDEVICE(INTEL, 0x4b97), },
- { PCI_VDEVICE(INTEL, 0x4b98), },
- { PCI_VDEVICE(INTEL, 0x4b99), },
- { PCI_VDEVICE(INTEL, 0x4b9a), },
- { PCI_VDEVICE(INTEL, 0x4b9b), },
- { PCI_VDEVICE(INTEL, 0x9ce3), },
- { PCI_VDEVICE(INTEL, 0x9ce4), },
+ { PCI_VDEVICE(INTEL, 0x0936), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x0f0a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x0f0c), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x228a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x228c), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b96), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b97), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b98), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b99), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b9a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b9b), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x9ce3), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x9ce4), REPORT_8250_CONFIG(LPSS), },
/* Exar devices */
- { PCI_VDEVICE(EXAR, PCI_ANY_ID), },
- { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+ { PCI_VDEVICE(EXAR, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
+ { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
/* Pericom devices */
- { PCI_VDEVICE(PERICOM, PCI_ANY_ID), },
- { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), },
+ { PCI_VDEVICE(PERICOM, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
+ { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
/* End of the black list */
{ }
@@ -3840,8 +4026,12 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
exclude = pci_match_id(blacklist, dev);
- if (exclude)
+ if (exclude) {
+ if (exclude->driver_data)
+ pci_warn(dev, "ignoring port, enable %s to handle\n",
+ (const char *)exclude->driver_data);
return -ENODEV;
+ }
rc = pcim_enable_device(dev);
pci_save_state(dev);
@@ -4110,13 +4300,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
/*
- * EndRun Technologies. PCI express device range.
- * EndRun PTP/1588 has 2 Native UARTs.
- */
- { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_endrun_2_3906250 },
- /*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
*
@@ -4225,158 +4408,165 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
/*
* Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
/*
* Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
+ /*
+ * EndRun Technologies. PCI express device range.
+ * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952.
+ */
+ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_15625000 },
/*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1fbd5bf264be..78b6dedc43e6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -263,7 +263,7 @@ static const struct serial8250_config uart_config[] = {
.tx_loadsz = 63,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
UART_FCR7_64BYTE,
- .flags = UART_CAP_FIFO,
+ .flags = UART_CAP_FIFO | UART_CAP_NOTEMT,
},
[PORT_RT2880] = {
.name = "Palmchip BK-3103",
@@ -538,27 +538,6 @@ serial_port_out_sync(struct uart_port *p, int offset, int value)
}
/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
- serial_out(up, UART_SCR, offset);
- serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-
-/*
* FIFO support.
*/
static void serial8250_clear_fifos(struct uart_8250_port *p)
@@ -1504,18 +1483,19 @@ static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
}
-static void __stop_tx_rs485(struct uart_8250_port *p)
+static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
{
struct uart_8250_em485 *em485 = p->em485;
+ stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC;
+
/*
* rs485_stop_tx() is going to set RTS according to config
* AND flush RX FIFO if required.
*/
- if (p->port.rs485.delay_rts_after_send > 0) {
+ if (stop_delay > 0) {
em485->active_timer = &em485->stop_tx_timer;
- start_hrtimer_ms(&em485->stop_tx_timer,
- p->port.rs485.delay_rts_after_send);
+ hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
} else {
p->rs485_stop_tx(p);
em485->active_timer = NULL;
@@ -1535,16 +1515,32 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (em485) {
unsigned char lsr = serial_in(p, UART_LSR);
+ u64 stop_delay = 0;
+
+ if (!(lsr & UART_LSR_THRE))
+ return;
/*
* To provide required timeing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
- * shift register are empty. It is for device driver to enable
- * interrupt on TEMT.
+ * shift register are empty. The device driver should either
+ * enable interrupt on TEMT or set UART_CAP_NOTEMT that will
+ * enlarge stop_tx_timer by the tx time of one frame to cover
+ * for emptying of the shift register.
*/
- if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
- return;
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (!(p->capabilities & UART_CAP_NOTEMT))
+ return;
+ /*
+ * RTS might get deasserted too early with the normal
+ * frame timing formula. It seems to suggest THRE might
+ * get asserted already during tx of the stop bit
+ * rather than after it is fully sent.
+ * Roughly estimate 1 extra bit here with / 7.
+ */
+ stop_delay = p->port.frame_time + DIV_ROUND_UP(p->port.frame_time, 7);
+ }
- __stop_tx_rs485(p);
+ __stop_tx_rs485(p, stop_delay);
}
__do_stop_tx(p);
}
@@ -1948,9 +1944,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
- (up->ier & UART_IER_THRI))
- serial8250_tx_chars(up);
+ if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
+ if (!up->dma || up->dma->tx_err)
+ serial8250_tx_chars(up);
+ else
+ __stop_tx(up);
+ }
uart_unlock_and_check_sysrq_irqrestore(port, flags);
@@ -2077,10 +2076,7 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
serial8250_rpm_put(up);
}
-/*
- * Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+static void wait_for_lsr(struct uart_8250_port *up, int bits)
{
unsigned int status, tmout = 10000;
@@ -2097,6 +2093,16 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
udelay(1);
touch_nmi_watchdog();
}
+}
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+ unsigned int tmout;
+
+ wait_for_lsr(up, bits);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -2614,10 +2620,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
}
if (!(c_cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
return cval;
}
@@ -3333,6 +3337,35 @@ static void serial8250_console_restore(struct uart_8250_port *up)
}
/*
+ * Print a string to the serial port using the device FIFO
+ *
+ * It sends fifosize bytes and then waits for the fifo
+ * to get empty.
+ */
+static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ const char *s, unsigned int count)
+{
+ int i;
+ const char *end = s + count;
+ unsigned int fifosize = up->tx_loadsz;
+ bool cr_sent = false;
+
+ while (s != end) {
+ wait_for_lsr(up, UART_LSR_THRE);
+
+ for (i = 0; i < fifosize && s != end; ++i) {
+ if (*s == '\n' && !cr_sent) {
+ serial_out(up, UART_TX, '\r');
+ cr_sent = true;
+ } else {
+ serial_out(up, UART_TX, *s++);
+ cr_sent = false;
+ }
+ }
+ }
+}
+
+/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
@@ -3347,7 +3380,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
struct uart_8250_em485 *em485 = up->em485;
struct uart_port *port = &up->port;
unsigned long flags;
- unsigned int ier;
+ unsigned int ier, use_fifo;
int locked = 1;
touch_nmi_watchdog();
@@ -3379,7 +3412,30 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
mdelay(port->rs485.delay_rts_before_send);
}
- uart_console_write(port, s, count, serial8250_console_putchar);
+ use_fifo = (up->capabilities & UART_CAP_FIFO) &&
+ /*
+ * BCM283x requires to check the fifo
+ * after each byte.
+ */
+ !(up->capabilities & UART_CAP_MINI) &&
+ /*
+ * tx_loadsz contains the transmit fifo size
+ */
+ up->tx_loadsz > 1 &&
+ (up->fcr & UART_FCR_ENABLE_FIFO) &&
+ port->state &&
+ test_bit(TTY_PORT_INITIALIZED, &port->state->port.iflags) &&
+ /*
+ * After we put a data in the fifo, the controller will send
+ * it regardless of the CTS state. Therefore, only use fifo
+ * if we don't use control flow.
+ */
+ !(up->port.flags & UPF_CONS_FLOW);
+
+ if (likely(use_fifo))
+ serial8250_console_fifo_write(up, s, count);
+ else
+ uart_console_write(port, s, count, serial8250_console_putchar);
/*
* Finally, wait for transmitter to become empty
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 33ca98bfa5b3..795e55142d4c 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include "8250.h"
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index cd93ea6eed65..fdb6c4188695 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -380,7 +380,7 @@ config SERIAL_8250_DW
config SERIAL_8250_EM
tristate "Support for Emma Mobile integrated serial port"
depends on SERIAL_8250 && HAVE_CLK
- depends on (ARM && ARCH_RENESAS) || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Selecting this option will add support for the integrated serial
port hardware found on the Emma Mobile line of processors.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index dbac90e2e209..a452748c69b2 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -782,7 +782,7 @@ config SERIAL_PMACZILOG_CONSOLE
config SERIAL_CPM
tristate "CPM SCC/SMC serial port support"
- depends on CPM2 || CPM1
+ depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
select SERIAL_CORE
help
This driver supports the SCC and SMC serial ports on Motorola
@@ -806,7 +806,7 @@ config SERIAL_CPM_CONSOLE
config SERIAL_PIC32
tristate "Microchip PIC32 serial support"
- depends on MACH_PIC32
+ depends on MACH_PIC32 || (MIPS && COMPILE_TEST)
select SERIAL_CORE
help
If you have a PIC32, this driver supports the serial ports.
@@ -817,7 +817,7 @@ config SERIAL_PIC32
config SERIAL_PIC32_CONSOLE
bool "PIC32 serial console support"
- depends on SERIAL_PIC32
+ depends on SERIAL_PIC32=y
select SERIAL_CORE_CONSOLE
help
If you have a PIC32, this driver supports the putting a console on one
@@ -1246,7 +1246,7 @@ config SERIAL_XILINX_PS_UART_CONSOLE
config SERIAL_AR933X
tristate "AR933X serial port support"
- depends on HAVE_CLK && ATH79
+ depends on (HAVE_CLK && ATH79) || (MIPS && COMPILE_TEST)
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
help
@@ -1442,6 +1442,7 @@ config SERIAL_STM32_CONSOLE
bool "Support for console on STM32"
depends on SERIAL_STM32=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
config SERIAL_MVEBU_UART
bool "Marvell EBU serial port support"
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 1c16345d0a1f..cb791c5149a3 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -168,10 +168,8 @@ static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
}
}
- if (pending == 0) {
- pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
- writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
- }
+ if (pending == 0)
+ altera_jtaguart_stop_tx(port);
}
static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4d11a3e547f9..97ef41cb2721 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -42,8 +42,6 @@
#include <linux/io.h>
#include <linux/acpi.h>
-#include "amba-pl011.h"
-
#define UART_NR 14
#define SERIAL_AMBA_MAJOR 204
@@ -55,6 +53,36 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+enum {
+ REG_DR,
+ REG_ST_DMAWM,
+ REG_ST_TIMEOUT,
+ REG_FR,
+ REG_LCRH_RX,
+ REG_LCRH_TX,
+ REG_IBRD,
+ REG_FBRD,
+ REG_CR,
+ REG_IFLS,
+ REG_IMSC,
+ REG_RIS,
+ REG_MIS,
+ REG_ICR,
+ REG_DMACR,
+ REG_ST_XFCR,
+ REG_ST_XON1,
+ REG_ST_XON2,
+ REG_ST_XOFF1,
+ REG_ST_XOFF2,
+ REG_ST_ITCR,
+ REG_ST_ITIP,
+ REG_ST_ABCR,
+ REG_ST_ABIMSC,
+
+ /* The size of the array - must be last */
+ REG_ARRAY_SIZE,
+};
+
static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
[REG_FR] = UART01x_FR,
@@ -2175,25 +2203,11 @@ static int pl011_rs485_config(struct uart_port *port,
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- /* pick sane settings if the user hasn't */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
- rs485->flags |= SER_RS485_RTS_ON_SEND;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
- memset(rs485->padding, 0, sizeof(rs485->padding));
-
if (port->rs485.flags & SER_RS485_ENABLED)
pl011_rs485_tx_stop(uap);
- /* Set new configuration */
- port->rs485 = *rs485;
-
/* Make sure auto RTS is disabled */
- if (port->rs485.flags & SER_RS485_ENABLED) {
+ if (rs485->flags & SER_RS485_ENABLED) {
u32 cr = pl011_read(uap, REG_CR);
cr &= ~UART011_CR_RTSEN;
diff --git a/drivers/tty/serial/amba-pl011.h b/drivers/tty/serial/amba-pl011.h
deleted file mode 100644
index 077eb12a3472..000000000000
--- a/drivers/tty/serial/amba-pl011.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AMBA_PL011_H
-#define AMBA_PL011_H
-
-enum {
- REG_DR,
- REG_ST_DMAWM,
- REG_ST_TIMEOUT,
- REG_FR,
- REG_LCRH_RX,
- REG_LCRH_TX,
- REG_IBRD,
- REG_FBRD,
- REG_CR,
- REG_IFLS,
- REG_IMSC,
- REG_RIS,
- REG_MIS,
- REG_ICR,
- REG_DMACR,
- REG_ST_XFCR,
- REG_ST_XON1,
- REG_ST_XON2,
- REG_ST_XOFF1,
- REG_ST_XOFF2,
- REG_ST_ITCR,
- REG_ST_ITIP,
- REG_ST_ABCR,
- REG_ST_ABIMSC,
-
- /* The size of the array - must be last */
- REG_ARRAY_SIZE,
-};
-
-#endif
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3a45e4fc7993..dd1c7e4bd1c9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -299,11 +299,9 @@ static int atmel_config_rs485(struct uart_port *port,
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
- port->rs485 = *rs485conf;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+ if (rs485conf->flags & SER_RS485_RX_DURING_TX)
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
else
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 6113b953ce25..8c582779cf22 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -19,6 +19,8 @@ struct gpio_desc;
#include "cpm_uart_cpm2.h"
#elif defined(CONFIG_CPM1)
#include "cpm_uart_cpm1.h"
+#elif defined(CONFIG_COMPILE_TEST)
+#include "cpm_uart_cpm2.h"
#endif
#define SERIAL_CPM_MAJOR 204
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d6d3db9c3b1f..db07d6a5d764 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1247,7 +1247,7 @@ static int cpm_uart_init_port(struct device_node *np,
}
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-#ifdef CONFIG_CONSOLE_POLL
+#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_CPM_CONSOLE)
if (!udbg_port)
#endif
udbg_putc = NULL;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index 6a1cd03bfe39..108af254e8f3 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -25,7 +25,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/fs_pd.h>
-#include <asm/prom.h>
#include <linux/serial_core.h>
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index e37a917b9dbb..af951e6a2ef4 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port,
case CS8:
default:
config |= UA_CONFIG_CHAR_LEN;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index be12fee94db5..0d6e62f6bb07 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -239,8 +239,6 @@
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
-static DEFINE_IDA(fsl_lpuart_ida);
-
enum lpuart_type {
VF610_LPUART,
LS1021A_LPUART,
@@ -276,7 +274,6 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
- bool id_allocated;
};
struct lpuart_soc_data {
@@ -1118,7 +1115,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
struct dma_chan *chan = sport->dma_rx_chan;
struct circ_buf *ring = &sport->rx_ring;
unsigned long flags;
- int count = 0, copied;
+ int count, copied;
if (lpuart_is_32(sport)) {
unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
@@ -1378,19 +1375,6 @@ static int lpuart_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transfer _or_ after
- * transfer, other variants are not supported by the hardware.
- */
-
- if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND)))
- rs485->flags |= SER_RS485_RTS_ON_SEND;
-
- if (rs485->flags & SER_RS485_RTS_ON_SEND &&
- rs485->flags & SER_RS485_RTS_AFTER_SEND)
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
- /*
* The hardware defaults to RTS logic HIGH while transfer.
* Switch polarity in case RTS shall be logic HIGH
* after transfer.
@@ -1402,9 +1386,6 @@ static int lpuart_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSPOL;
}
- /* Store the new configuration */
- sport->port.rs485 = *rs485;
-
writeb(modem, sport->port.membase + UARTMODEM);
return 0;
}
@@ -1429,19 +1410,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transfer _or_ after
- * transfer, other variants are not supported by the hardware.
- */
-
- if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND)))
- rs485->flags |= SER_RS485_RTS_ON_SEND;
-
- if (rs485->flags & SER_RS485_RTS_ON_SEND &&
- rs485->flags & SER_RS485_RTS_AFTER_SEND)
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
- /*
* The hardware defaults to RTS logic HIGH while transfer.
* Switch polarity in case RTS shall be logic HIGH
* after transfer.
@@ -1453,9 +1421,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSPOL;
}
- /* Store the new configuration */
- sport->port.rs485 = *rs485;
-
lpuart32_write(&sport->port, modem, UARTMODIR);
return 0;
}
@@ -2145,12 +2110,10 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
if (sport->port.rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
- if (termios->c_cflag & CRTSCTS) {
- modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
- } else {
- termios->c_cflag &= ~CRTSCTS;
+ if (termios->c_cflag & CRTSCTS)
+ modem |= UARTMODIR_RXRTSE | UARTMODIR_TXCTSE;
+ else
modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
- }
if (termios->c_cflag & CSTOPB)
bd |= UARTBAUD_SBNS;
@@ -2717,23 +2680,18 @@ static int lpuart_probe(struct platform_device *pdev)
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&pdev->dev, "port line is full, add device failed\n");
- return ret;
- }
- sport->id_allocated = true;
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
}
if (ret >= ARRAY_SIZE(lpuart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", ret);
- ret = -EINVAL;
- goto failed_out_of_range;
+ return -EINVAL;
}
sport->port.line = ret;
ret = lpuart_enable_clks(sport);
if (ret)
- goto failed_clock_enable;
+ return ret;
sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
lpuart_ports[sport->port.line] = sport;
@@ -2781,10 +2739,6 @@ failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
lpuart_disable_clks(sport);
-failed_clock_enable:
-failed_out_of_range:
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
return ret;
}
@@ -2794,9 +2748,6 @@ static int lpuart_remove(struct platform_device *pdev)
uart_remove_one_port(&lpuart_reg, &sport->port);
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
-
lpuart_disable_clks(sport);
if (sport->dma_tx_chan)
@@ -2926,7 +2877,6 @@ static int __init lpuart_serial_init(void)
static void __exit lpuart_serial_exit(void)
{
- ida_destroy(&fsl_lpuart_ida);
platform_driver_unregister(&lpuart_driver);
uart_unregister_driver(&lpuart_reg);
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 03a2fe9f4c9a..45df29947fe8 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/major.h>
#include <linux/string.h>
@@ -41,15 +42,273 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include "icom.h"
-
/*#define ICOM_TRACE enable port trace capabilities */
#define ICOM_DRIVER_NAME "icom"
-#define ICOM_VERSION_STR "1.3.1"
#define NR_PORTS 128
-#define ICOM_PORT ((struct icom_port *)port)
-#define to_icom_adapter(d) container_of(d, struct icom_adapter, kref)
+
+static const unsigned int icom_acfg_baud[] = {
+ 300,
+ 600,
+ 900,
+ 1200,
+ 1800,
+ 2400,
+ 3600,
+ 4800,
+ 7200,
+ 9600,
+ 14400,
+ 19200,
+ 28800,
+ 38400,
+ 57600,
+ 76800,
+ 115200,
+ 153600,
+ 230400,
+ 307200,
+ 460800,
+};
+#define BAUD_TABLE_LIMIT (ARRAY_SIZE(icom_acfg_baud) - 1)
+
+struct icom_regs {
+ u32 control; /* Adapter Control Register */
+ u32 interrupt; /* Adapter Interrupt Register */
+ u32 int_mask; /* Adapter Interrupt Mask Reg */
+ u32 int_pri; /* Adapter Interrupt Priority r */
+ u32 int_reg_b; /* Adapter non-masked Interrupt */
+ u32 resvd01;
+ u32 resvd02;
+ u32 resvd03;
+ u32 control_2; /* Adapter Control Register 2 */
+ u32 interrupt_2; /* Adapter Interrupt Register 2 */
+ u32 int_mask_2; /* Adapter Interrupt Mask 2 */
+ u32 int_pri_2; /* Adapter Interrupt Prior 2 */
+ u32 int_reg_2b; /* Adapter non-masked 2 */
+};
+
+struct func_dram {
+ u32 reserved[108]; /* 0-1B0 reserved by personality code */
+ u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
+ u8 RcvStnAddr; /* 1B4 Receive Station Addr */
+ u8 IdleState; /* 1B5 Idle State */
+ u8 IdleMonitor; /* 1B6 Idle Monitor */
+ u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
+ u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
+ u8 StartXmitCmd; /* 1BC Start Xmit Command */
+ u8 HDLCConfigReg; /* 1BD Reserved */
+ u8 CauseCode; /* 1BE Cause code for fatal error */
+ u8 xchar; /* 1BF High priority send */
+ u32 reserved3; /* 1C0-1C3 Reserved */
+ u8 PrevCmdReg; /* 1C4 Reserved */
+ u8 CmdReg; /* 1C5 Command Register */
+ u8 async_config2; /* 1C6 Async Config Byte 2 */
+ u8 async_config3; /* 1C7 Async Config Byte 3 */
+ u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
+ u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
+ u8 misc_flags; /* 1DD misc flags */
+#define V2_HARDWARE 0x40
+#define ICOM_HDW_ACTIVE 0x01
+ u8 call_length; /* 1DE Phone #/CFI buff ln */
+ u8 call_length2; /* 1DF Upper byte (unused) */
+ u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
+ u16 timer_value; /* 1E4-1E5 general timer value */
+ u8 timer_command; /* 1E6 general timer cmd */
+ u8 dce_command; /* 1E7 dce command reg */
+ u8 dce_cmd_status; /* 1E8 dce command stat */
+ u8 x21_r1_ioff; /* 1E9 dce ready counter */
+ u8 x21_r0_ioff; /* 1EA dce not ready ctr */
+ u8 x21_ralt_ioff; /* 1EB dce CNR counter */
+ u8 x21_r1_ion; /* 1EC dce ready I on ctr */
+ u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
+ u8 ier; /* 1EE Interrupt Enable */
+ u8 isr; /* 1EF Input Signal Reg */
+ u8 osr; /* 1F0 Output Signal Reg */
+ u8 reset; /* 1F1 Reset/Reload Reg */
+ u8 disable; /* 1F2 Disable Reg */
+ u8 sync; /* 1F3 Sync Reg */
+ u8 error_stat; /* 1F4 Error Status */
+ u8 cable_id; /* 1F5 Cable ID */
+ u8 cs_length; /* 1F6 CS Load Length */
+ u8 mac_length; /* 1F7 Mac Load Length */
+ u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
+ u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
+};
+
+/*
+ * adapter defines and structures
+ */
+#define ICOM_CONTROL_START_A 0x00000008
+#define ICOM_CONTROL_STOP_A 0x00000004
+#define ICOM_CONTROL_START_B 0x00000002
+#define ICOM_CONTROL_STOP_B 0x00000001
+#define ICOM_CONTROL_START_C 0x00000008
+#define ICOM_CONTROL_STOP_C 0x00000004
+#define ICOM_CONTROL_START_D 0x00000002
+#define ICOM_CONTROL_STOP_D 0x00000001
+#define ICOM_IRAM_OFFSET 0x1000
+#define ICOM_IRAM_SIZE 0x0C00
+#define ICOM_DCE_IRAM_OFFSET 0x0A00
+#define ICOM_CABLE_ID_VALID 0x01
+#define ICOM_CABLE_ID_MASK 0xF0
+#define ICOM_DISABLE 0x80
+#define CMD_XMIT_RCV_ENABLE 0xC0
+#define CMD_XMIT_ENABLE 0x40
+#define CMD_RCV_DISABLE 0x00
+#define CMD_RCV_ENABLE 0x80
+#define CMD_RESTART 0x01
+#define CMD_HOLD_XMIT 0x02
+#define CMD_SND_BREAK 0x04
+#define RS232_CABLE 0x06
+#define V24_CABLE 0x0E
+#define V35_CABLE 0x0C
+#define V36_CABLE 0x02
+#define NO_CABLE 0x00
+#define START_DOWNLOAD 0x80
+#define ICOM_INT_MASK_PRC_A 0x00003FFF
+#define ICOM_INT_MASK_PRC_B 0x3FFF0000
+#define ICOM_INT_MASK_PRC_C 0x00003FFF
+#define ICOM_INT_MASK_PRC_D 0x3FFF0000
+#define INT_RCV_COMPLETED 0x1000
+#define INT_XMIT_COMPLETED 0x2000
+#define INT_IDLE_DETECT 0x0800
+#define INT_RCV_DISABLED 0x0400
+#define INT_XMIT_DISABLED 0x0200
+#define INT_RCV_XMIT_SHUTDOWN 0x0100
+#define INT_FATAL_ERROR 0x0080
+#define INT_CABLE_PULL 0x0020
+#define INT_SIGNAL_CHANGE 0x0010
+#define HDLC_PPP_PURE_ASYNC 0x02
+#define HDLC_FF_FILL 0x00
+#define HDLC_HDW_FLOW 0x01
+#define START_XMIT 0x80
+#define ICOM_ACFG_DRIVE1 0x20
+#define ICOM_ACFG_NO_PARITY 0x00
+#define ICOM_ACFG_PARITY_ENAB 0x02
+#define ICOM_ACFG_PARITY_ODD 0x01
+#define ICOM_ACFG_8BPC 0x00
+#define ICOM_ACFG_7BPC 0x04
+#define ICOM_ACFG_6BPC 0x08
+#define ICOM_ACFG_5BPC 0x0C
+#define ICOM_ACFG_1STOP_BIT 0x00
+#define ICOM_ACFG_2STOP_BIT 0x10
+#define ICOM_DTR 0x80
+#define ICOM_RTS 0x40
+#define ICOM_RI 0x08
+#define ICOM_DSR 0x80
+#define ICOM_DCD 0x20
+#define ICOM_CTS 0x40
+
+#define NUM_XBUFFS 1
+#define NUM_RBUFFS 2
+#define RCV_BUFF_SZ 0x0200
+#define XMIT_BUFF_SZ 0x1000
+struct statusArea {
+ /**********************************************/
+ /* Transmit Status Area */
+ /**********************************************/
+ struct xmit_status_area{
+ __le32 leNext; /* Next entry in Little Endian on Adapter */
+ __le32 leNextASD;
+ __le32 leBuffer; /* Buffer for entry in LE for Adapter */
+ __le16 leLengthASD;
+ __le16 leOffsetASD;
+ __le16 leLength; /* Length of data in segment */
+ __le16 flags;
+#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
+#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
+#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
+#define SA_FLAGS_READY_TO_XMIT 0x0800
+#define SA_FLAGS_STAT_MASK 0x007F
+ } xmit[NUM_XBUFFS];
+
+ /**********************************************/
+ /* Receive Status Area */
+ /**********************************************/
+ struct {
+ __le32 leNext; /* Next entry in Little Endian on Adapter */
+ __le32 leNextASD;
+ __le32 leBuffer; /* Buffer for entry in LE for Adapter */
+ __le16 WorkingLength; /* size of segment */
+ __le16 reserv01;
+ __le16 leLength; /* Length of data in segment */
+ __le16 flags;
+#define SA_FL_RCV_DONE 0x0010 /* Data ready */
+#define SA_FLAGS_OVERRUN 0x0040
+#define SA_FLAGS_PARITY_ERROR 0x0080
+#define SA_FLAGS_FRAME_ERROR 0x0001
+#define SA_FLAGS_FRAME_TRUNC 0x0002
+#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
+#define SA_FLAGS_RCV_MASK 0xFFE6
+ } rcv[NUM_RBUFFS];
+};
+
+struct icom_adapter;
+
+
+#define ICOM_MAJOR 243
+#define ICOM_MINOR_START 0
+
+struct icom_port {
+ struct uart_port uart_port;
+ unsigned char cable_id;
+ unsigned char read_status_mask;
+ unsigned char ignore_status_mask;
+ void __iomem * int_reg;
+ struct icom_regs __iomem *global_reg;
+ struct func_dram __iomem *dram;
+ int port;
+ struct statusArea *statStg;
+ dma_addr_t statStg_pci;
+ __le32 *xmitRestart;
+ dma_addr_t xmitRestart_pci;
+ unsigned char *xmit_buf;
+ dma_addr_t xmit_buf_pci;
+ unsigned char *recv_buf;
+ dma_addr_t recv_buf_pci;
+ int next_rcv;
+ int status;
+#define ICOM_PORT_ACTIVE 1 /* Port exists. */
+#define ICOM_PORT_OFF 0 /* Port does not exist. */
+ struct icom_adapter *adapter;
+};
+
+struct icom_adapter {
+ void __iomem * base_addr;
+ unsigned long base_addr_pci;
+ struct pci_dev *pci_dev;
+ struct icom_port port_info[4];
+ int index;
+ int version;
+#define ADAPTER_V1 0x0001
+#define ADAPTER_V2 0x0002
+ u32 subsystem_id;
+#define FOUR_PORT_MODEL 0x0252
+#define V2_TWO_PORTS_RVX 0x021A
+#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
+ int numb_ports;
+ struct list_head icom_adapter_entry;
+ struct kref kref;
+};
+
+/* prototype */
+extern void iCom_sercons_init(void);
+
+struct lookup_proc_table {
+ u32 __iomem *global_control_reg;
+ unsigned long processor_id;
+};
+
+struct lookup_int_table {
+ u32 __iomem *global_int_mask;
+ unsigned long processor_id;
+};
+
+static inline struct icom_port *to_icom_port(struct uart_port *port)
+{
+ return container_of(port, struct icom_port, uart_port);
+}
static const struct pci_device_id icom_pci_table[] = {
{
@@ -222,7 +481,7 @@ static int get_port_memory(struct icom_port *icom_port)
if (index < (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
- (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_ADDR", stgAddr);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
@@ -231,7 +490,7 @@ static int get_port_memory(struct icom_port *icom_port)
} else if (index == (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
- (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
icom_port->statStg->xmit[index].leBuffer =
@@ -249,7 +508,7 @@ static int get_port_memory(struct icom_port *icom_port)
stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
icom_port->statStg->rcv[index].leLength = 0;
icom_port->statStg->rcv[index].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ cpu_to_le16(RCV_BUFF_SZ);
if (index < (NUM_RBUFFS - 1) ) {
offset = stgAddr - (unsigned long) icom_port->statStg;
icom_port->statStg->rcv[index].leNext =
@@ -617,16 +876,17 @@ unlock:
static int icom_write(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned long data_count;
unsigned char cmdReg;
unsigned long offset;
int temp_tail = port->state->xmit.tail;
- trace(ICOM_PORT, "WRITE", 0);
+ trace(icom_port, "WRITE", 0);
- if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT) {
- trace(ICOM_PORT, "WRITE_FULL", 0);
+ trace(icom_port, "WRITE_FULL", 0);
return 0;
}
@@ -634,7 +894,7 @@ static int icom_write(struct uart_port *port)
while ((port->state->xmit.head != temp_tail) &&
(data_count <= XMIT_BUFF_SZ)) {
- ICOM_PORT->xmit_buf[data_count++] =
+ icom_port->xmit_buf[data_count++] =
port->state->xmit.buf[temp_tail];
temp_tail++;
@@ -642,22 +902,22 @@ static int icom_write(struct uart_port *port)
}
if (data_count) {
- ICOM_PORT->statStg->xmit[0].flags =
+ icom_port->statStg->xmit[0].flags =
cpu_to_le16(SA_FLAGS_READY_TO_XMIT);
- ICOM_PORT->statStg->xmit[0].leLength =
+ icom_port->statStg->xmit[0].leLength =
cpu_to_le16(data_count);
offset =
- (unsigned long) &ICOM_PORT->statStg->xmit[0] -
- (unsigned long) ICOM_PORT->statStg;
- *ICOM_PORT->xmitRestart =
- cpu_to_le32(ICOM_PORT->statStg_pci + offset);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ (unsigned long) &icom_port->statStg->xmit[0] -
+ (unsigned long) icom_port->statStg;
+ *icom_port->xmitRestart =
+ cpu_to_le32(icom_port->statStg_pci + offset);
+ cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg | CMD_XMIT_RCV_ENABLE,
- &ICOM_PORT->dram->CmdReg);
- writeb(START_XMIT, &ICOM_PORT->dram->StartXmitCmd);
- trace(ICOM_PORT, "WRITE_START", data_count);
+ &icom_port->dram->CmdReg);
+ writeb(START_XMIT, &icom_port->dram->StartXmitCmd);
+ trace(icom_port, "WRITE_START", data_count);
/* write flush */
- readb(&ICOM_PORT->dram->StartXmitCmd);
+ readb(&icom_port->dram->StartXmitCmd);
}
return data_count;
@@ -696,8 +956,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
- unsigned short int count;
- int i;
+ u16 count, i;
if (port_int_reg & (INT_XMIT_COMPLETED)) {
trace(icom_port, "XMIT_COMPLETE", 0);
@@ -706,8 +965,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
icom_port->statStg->xmit[0].flags &=
cpu_to_le16(~SA_FLAGS_READY_TO_XMIT);
- count = (unsigned short int)
- cpu_to_le16(icom_port->statStg->xmit[0].leLength);
+ count = le16_to_cpu(icom_port->statStg->xmit[0].leLength);
icom_port->uart_port.icount.tx += count;
for (i=0; i<count &&
@@ -729,7 +987,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
short int count, rcv_buff;
struct tty_port *port = &icom_port->uart_port.state->port;
- unsigned short int status;
+ u16 status;
struct uart_icount *icount;
unsigned long offset;
unsigned char flag;
@@ -737,19 +995,18 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
trace(icom_port, "RCV_COMPLETE", 0);
rcv_buff = icom_port->next_rcv;
- status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
while (status & SA_FL_RCV_DONE) {
int first = -1;
trace(icom_port, "FID_STATUS", status);
- count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
+ count = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].leLength);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
- offset =
- cpu_to_le32(icom_port->statStg->rcv[rcv_buff].leBuffer) -
+ offset = le32_to_cpu(icom_port->statStg->rcv[rcv_buff].leBuffer) -
icom_port->recv_buf_pci;
/* Block copy all but the last byte as this may have status */
@@ -819,13 +1076,13 @@ ignore_char:
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
icom_port->statStg->rcv[rcv_buff].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ cpu_to_le16(RCV_BUFF_SZ);
rcv_buff++;
if (rcv_buff == NUM_RBUFFS)
rcv_buff = 0;
- status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
@@ -925,11 +1182,12 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
*/
static unsigned int icom_tx_empty(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
int ret;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT)
ret = TIOCSER_TEMT;
else
@@ -941,38 +1199,40 @@ static unsigned int icom_tx_empty(struct uart_port *port)
static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char local_osr;
- trace(ICOM_PORT, "SET_MODEM", 0);
- local_osr = readb(&ICOM_PORT->dram->osr);
+ trace(icom_port, "SET_MODEM", 0);
+ local_osr = readb(&icom_port->dram->osr);
if (mctrl & TIOCM_RTS) {
- trace(ICOM_PORT, "RAISE_RTS", 0);
+ trace(icom_port, "RAISE_RTS", 0);
local_osr |= ICOM_RTS;
} else {
- trace(ICOM_PORT, "LOWER_RTS", 0);
+ trace(icom_port, "LOWER_RTS", 0);
local_osr &= ~ICOM_RTS;
}
if (mctrl & TIOCM_DTR) {
- trace(ICOM_PORT, "RAISE_DTR", 0);
+ trace(icom_port, "RAISE_DTR", 0);
local_osr |= ICOM_DTR;
} else {
- trace(ICOM_PORT, "LOWER_DTR", 0);
+ trace(icom_port, "LOWER_DTR", 0);
local_osr &= ~ICOM_DTR;
}
- writeb(local_osr, &ICOM_PORT->dram->osr);
+ writeb(local_osr, &icom_port->dram->osr);
}
static unsigned int icom_get_mctrl(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char status;
unsigned int result;
- trace(ICOM_PORT, "GET_MODEM", 0);
+ trace(icom_port, "GET_MODEM", 0);
- status = readb(&ICOM_PORT->dram->isr);
+ status = readb(&icom_port->dram->isr);
result = ((status & ICOM_DCD) ? TIOCM_CAR : 0)
| ((status & ICOM_RI) ? TIOCM_RNG : 0)
@@ -983,44 +1243,47 @@ static unsigned int icom_get_mctrl(struct uart_port *port)
static void icom_stop_tx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "STOP", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "STOP", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg | CMD_HOLD_XMIT, &icom_port->dram->CmdReg);
}
static void icom_start_tx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "START", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "START", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT)
writeb(cmdReg & ~CMD_HOLD_XMIT,
- &ICOM_PORT->dram->CmdReg);
+ &icom_port->dram->CmdReg);
icom_write(port);
}
static void icom_send_xchar(struct uart_port *port, char ch)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char xdata;
int index;
unsigned long flags;
- trace(ICOM_PORT, "SEND_XCHAR", ch);
+ trace(icom_port, "SEND_XCHAR", ch);
/* wait .1 sec to send char */
for (index = 0; index < 10; index++) {
spin_lock_irqsave(&port->lock, flags);
- xdata = readb(&ICOM_PORT->dram->xchar);
+ xdata = readb(&icom_port->dram->xchar);
if (xdata == 0x00) {
- trace(ICOM_PORT, "QUICK_WRITE", 0);
- writeb(ch, &ICOM_PORT->dram->xchar);
+ trace(icom_port, "QUICK_WRITE", 0);
+ writeb(ch, &icom_port->dram->xchar);
/* flush write operation */
- xdata = readb(&ICOM_PORT->dram->xchar);
+ xdata = readb(&icom_port->dram->xchar);
spin_unlock_irqrestore(&port->lock, flags);
break;
}
@@ -1031,38 +1294,41 @@ static void icom_send_xchar(struct uart_port *port, char ch)
static void icom_stop_rx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
}
static void icom_break(struct uart_port *port, int break_state)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- trace(ICOM_PORT, "BREAK", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "BREAK", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
if (break_state == -1) {
- writeb(cmdReg | CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg | CMD_SND_BREAK, &icom_port->dram->CmdReg);
} else {
- writeb(cmdReg & ~CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static int icom_open(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
int retval;
- kref_get(&ICOM_PORT->adapter->kref);
- retval = startup(ICOM_PORT);
+ kref_get(&icom_port->adapter->kref);
+ retval = startup(icom_port);
if (retval) {
- kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
- trace(ICOM_PORT, "STARTUP_ERROR", 0);
+ kref_put(&icom_port->adapter->kref, icom_kref_release);
+ trace(icom_port, "STARTUP_ERROR", 0);
return retval;
}
@@ -1071,23 +1337,25 @@ static int icom_open(struct uart_port *port)
static void icom_close(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "CLOSE", 0);
+ trace(icom_port, "CLOSE", 0);
/* stop receiver */
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
- shutdown(ICOM_PORT);
+ shutdown(icom_port);
- kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
+ kref_put(&icom_port->adapter->kref, icom_kref_release);
}
static void icom_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old_termios)
{
+ struct icom_port *icom_port = to_icom_port(port);
int baud;
unsigned cflag, iflag;
char new_config2;
@@ -1099,7 +1367,7 @@ static void icom_set_termios(struct uart_port *port,
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- trace(ICOM_PORT, "CHANGE_SPEED", 0);
+ trace(icom_port, "CHANGE_SPEED", 0);
cflag = termios->c_cflag;
iflag = termios->c_iflag;
@@ -1130,12 +1398,12 @@ static void icom_set_termios(struct uart_port *port,
if (cflag & PARENB) {
/* parity bit enabled */
new_config2 |= ICOM_ACFG_PARITY_ENAB;
- trace(ICOM_PORT, "PARENB", 0);
+ trace(icom_port, "PARENB", 0);
}
if (cflag & PARODD) {
/* odd parity */
new_config2 |= ICOM_ACFG_PARITY_ODD;
- trace(ICOM_PORT, "PARODD", 0);
+ trace(icom_port, "PARODD", 0);
}
/* Determine divisor based on baud rate */
@@ -1155,100 +1423,99 @@ static void icom_set_termios(struct uart_port *port,
uart_update_timeout(port, cflag, baud);
/* CTS flow control flag and modem status interrupts */
- tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
if (cflag & CRTSCTS)
tmp_byte |= HDLC_HDW_FLOW;
else
tmp_byte &= ~HDLC_HDW_FLOW;
- writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
+ writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
/*
* Set up parity check flag
*/
- ICOM_PORT->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
+ icom_port->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
if (iflag & INPCK)
- ICOM_PORT->read_status_mask |=
+ icom_port->read_status_mask |=
SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR;
if ((iflag & BRKINT) || (iflag & PARMRK))
- ICOM_PORT->read_status_mask |= SA_FLAGS_BREAK_DET;
+ icom_port->read_status_mask |= SA_FLAGS_BREAK_DET;
/*
* Characters to ignore
*/
- ICOM_PORT->ignore_status_mask = 0;
+ icom_port->ignore_status_mask = 0;
if (iflag & IGNPAR)
- ICOM_PORT->ignore_status_mask |=
+ icom_port->ignore_status_mask |=
SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR;
if (iflag & IGNBRK) {
- ICOM_PORT->ignore_status_mask |= SA_FLAGS_BREAK_DET;
+ icom_port->ignore_status_mask |= SA_FLAGS_BREAK_DET;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (iflag & IGNPAR)
- ICOM_PORT->ignore_status_mask |= SA_FLAGS_OVERRUN;
+ icom_port->ignore_status_mask |= SA_FLAGS_OVERRUN;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((cflag & CREAD) == 0)
- ICOM_PORT->ignore_status_mask |= SA_FL_RCV_DONE;
+ icom_port->ignore_status_mask |= SA_FL_RCV_DONE;
/* Turn off Receiver to prepare for reset */
- writeb(CMD_RCV_DISABLE, &ICOM_PORT->dram->CmdReg);
+ writeb(CMD_RCV_DISABLE, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
- if (readb(&ICOM_PORT->dram->PrevCmdReg) == 0x00) {
+ if (readb(&icom_port->dram->PrevCmdReg) == 0x00) {
break;
}
}
/* clear all current buffers of data */
for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) {
- ICOM_PORT->statStg->rcv[rcv_buff].flags = 0;
- ICOM_PORT->statStg->rcv[rcv_buff].leLength = 0;
- ICOM_PORT->statStg->rcv[rcv_buff].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ icom_port->statStg->rcv[rcv_buff].flags = 0;
+ icom_port->statStg->rcv[rcv_buff].leLength = 0;
+ icom_port->statStg->rcv[rcv_buff].WorkingLength =
+ cpu_to_le16(RCV_BUFF_SZ);
}
for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) {
- ICOM_PORT->statStg->xmit[xmit_buff].flags = 0;
+ icom_port->statStg->xmit[xmit_buff].flags = 0;
}
/* activate changes and start xmit and receiver here */
/* Enable the receiver */
- writeb(new_config3, &(ICOM_PORT->dram->async_config3));
- writeb(new_config2, &(ICOM_PORT->dram->async_config2));
- tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ writeb(new_config3, &(icom_port->dram->async_config3));
+ writeb(new_config2, &(icom_port->dram->async_config2));
+ tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL;
- writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
- writeb(0x04, &(ICOM_PORT->dram->FlagFillIdleTimer)); /* 0.5 seconds */
- writeb(0xFF, &(ICOM_PORT->dram->ier)); /* enable modem signal interrupts */
+ writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
+ writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */
+ writeb(0xFF, &(icom_port->dram->ier)); /* enable modem signal interrupts */
/* reset processor */
- writeb(CMD_RESTART, &ICOM_PORT->dram->CmdReg);
+ writeb(CMD_RESTART, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
- if (readb(&ICOM_PORT->dram->CmdReg) == 0x00) {
+ if (readb(&icom_port->dram->CmdReg) == 0x00) {
break;
}
}
/* Enable Transmitter and Receiver */
offset =
- (unsigned long) &ICOM_PORT->statStg->rcv[0] -
- (unsigned long) ICOM_PORT->statStg;
- writel(ICOM_PORT->statStg_pci + offset,
- &ICOM_PORT->dram->RcvStatusAddr);
- ICOM_PORT->next_rcv = 0;
- ICOM_PORT->put_length = 0;
- *ICOM_PORT->xmitRestart = 0;
- writel(ICOM_PORT->xmitRestart_pci,
- &ICOM_PORT->dram->XmitStatusAddr);
- trace(ICOM_PORT, "XR_ENAB", 0);
- writeb(CMD_XMIT_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ (unsigned long) &icom_port->statStg->rcv[0] -
+ (unsigned long) icom_port->statStg;
+ writel(icom_port->statStg_pci + offset,
+ &icom_port->dram->RcvStatusAddr);
+ icom_port->next_rcv = 0;
+ *icom_port->xmitRestart = 0;
+ writel(icom_port->xmitRestart_pci,
+ &icom_port->dram->XmitStatusAddr);
+ trace(icom_port, "XR_ENAB", 0);
+ writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1258,15 +1525,6 @@ static const char *icom_type(struct uart_port *port)
return "icom";
}
-static void icom_release_port(struct uart_port *port)
-{
-}
-
-static int icom_request_port(struct uart_port *port)
-{
- return 0;
-}
-
static void icom_config_port(struct uart_port *port, int flags)
{
port->type = PORT_ICOM;
@@ -1285,8 +1543,6 @@ static const struct uart_ops icom_ops = {
.shutdown = icom_close,
.set_termios = icom_set_termios,
.type = icom_type,
- .release_port = icom_release_port,
- .request_port = icom_request_port,
.config_port = icom_config_port,
};
@@ -1315,7 +1571,6 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
icom_port = &icom_adapter->port_info[i];
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
- icom_port->imbed_modem = ICOM_UNKNOWN;
}
} else {
if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) {
@@ -1326,26 +1581,15 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
- icom_port->imbed_modem = ICOM_IMBED_MODEM;
}
} else {
icom_adapter->numb_ports = 4;
icom_adapter->port_info[0].port = 0;
icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE;
-
- if (subsystem_id ==
- PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM) {
- icom_adapter->port_info[0].imbed_modem = ICOM_IMBED_MODEM;
- } else {
- icom_adapter->port_info[0].imbed_modem = ICOM_RVX;
- }
-
icom_adapter->port_info[1].status = ICOM_PORT_OFF;
-
icom_adapter->port_info[2].port = 2;
icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE;
- icom_adapter->port_info[2].imbed_modem = ICOM_RVX;
icom_adapter->port_info[3].status = ICOM_PORT_OFF;
}
}
@@ -1401,7 +1645,6 @@ static int icom_alloc_adapter(struct icom_adapter
int adapter_count = 0;
struct icom_adapter *icom_adapter;
struct icom_adapter *cur_adapter_entry;
- struct list_head *tmp;
icom_adapter = kzalloc(sizeof(struct icom_adapter), GFP_KERNEL);
@@ -1409,10 +1652,8 @@ static int icom_alloc_adapter(struct icom_adapter
return -ENOMEM;
}
- list_for_each(tmp, &icom_adapter_head) {
- cur_adapter_entry =
- list_entry(tmp, struct icom_adapter,
- icom_adapter_entry);
+ list_for_each_entry(cur_adapter_entry, &icom_adapter_head,
+ icom_adapter_entry) {
if (cur_adapter_entry->index != adapter_count) {
break;
}
@@ -1420,7 +1661,8 @@ static int icom_alloc_adapter(struct icom_adapter
}
icom_adapter->index = adapter_count;
- list_add_tail(&icom_adapter->icom_adapter_entry, tmp);
+ list_add_tail(&icom_adapter->icom_adapter_entry,
+ &cur_adapter_entry->icom_adapter_entry);
*icom_adapter_ref = icom_adapter;
return 0;
@@ -1432,8 +1674,10 @@ static void icom_free_adapter(struct icom_adapter *icom_adapter)
kfree(icom_adapter);
}
-static void icom_remove_adapter(struct icom_adapter *icom_adapter)
+static void icom_kref_release(struct kref *kref)
{
+ struct icom_adapter *icom_adapter = container_of(kref,
+ struct icom_adapter, kref);
struct icom_port *icom_port;
int index;
@@ -1466,14 +1710,6 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
icom_free_adapter(icom_adapter);
}
-static void icom_kref_release(struct kref *kref)
-{
- struct icom_adapter *icom_adapter;
-
- icom_adapter = to_icom_adapter(kref);
- icom_remove_adapter(icom_adapter);
-}
-
static int icom_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
@@ -1501,7 +1737,7 @@ static int icom_probe(struct pci_dev *dev,
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
- return retval;
+ goto probe_exit0;
}
pci_write_config_dword(dev, PCI_COMMAND,
@@ -1589,11 +1825,9 @@ probe_exit0:
static void icom_remove(struct pci_dev *dev)
{
struct icom_adapter *icom_adapter;
- struct list_head *tmp;
- list_for_each(tmp, &icom_adapter_head) {
- icom_adapter = list_entry(tmp, struct icom_adapter,
- icom_adapter_entry);
+ list_for_each_entry(icom_adapter, &icom_adapter_head,
+ icom_adapter_entry) {
if (icom_adapter->pci_dev == dev) {
kref_put(&icom_adapter->kref, icom_kref_release);
return;
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
deleted file mode 100644
index 26e3aa7b01e2..000000000000
--- a/drivers/tty/serial/icom.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * icom.h
- *
- * Copyright (C) 2001 Michael Anderson, IBM Corporation
- *
- * Serial device driver include file.
- */
-
-#include <linux/serial_core.h>
-
-#define BAUD_TABLE_LIMIT ((sizeof(icom_acfg_baud)/sizeof(int)) - 1)
-static int icom_acfg_baud[] = {
- 300,
- 600,
- 900,
- 1200,
- 1800,
- 2400,
- 3600,
- 4800,
- 7200,
- 9600,
- 14400,
- 19200,
- 28800,
- 38400,
- 57600,
- 76800,
- 115200,
- 153600,
- 230400,
- 307200,
- 460800,
-};
-
-struct icom_regs {
- u32 control; /* Adapter Control Register */
- u32 interrupt; /* Adapter Interrupt Register */
- u32 int_mask; /* Adapter Interrupt Mask Reg */
- u32 int_pri; /* Adapter Interrupt Priority r */
- u32 int_reg_b; /* Adapter non-masked Interrupt */
- u32 resvd01;
- u32 resvd02;
- u32 resvd03;
- u32 control_2; /* Adapter Control Register 2 */
- u32 interrupt_2; /* Adapter Interrupt Register 2 */
- u32 int_mask_2; /* Adapter Interrupt Mask 2 */
- u32 int_pri_2; /* Adapter Interrupt Prior 2 */
- u32 int_reg_2b; /* Adapter non-masked 2 */
-};
-
-struct func_dram {
- u32 reserved[108]; /* 0-1B0 reserved by personality code */
- u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
- u8 RcvStnAddr; /* 1B4 Receive Station Addr */
- u8 IdleState; /* 1B5 Idle State */
- u8 IdleMonitor; /* 1B6 Idle Monitor */
- u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
- u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
- u8 StartXmitCmd; /* 1BC Start Xmit Command */
- u8 HDLCConfigReg; /* 1BD Reserved */
- u8 CauseCode; /* 1BE Cause code for fatal error */
- u8 xchar; /* 1BF High priority send */
- u32 reserved3; /* 1C0-1C3 Reserved */
- u8 PrevCmdReg; /* 1C4 Reserved */
- u8 CmdReg; /* 1C5 Command Register */
- u8 async_config2; /* 1C6 Async Config Byte 2 */
- u8 async_config3; /* 1C7 Async Config Byte 3 */
- u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
- u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
- u8 misc_flags; /* 1DD misc flags */
-#define V2_HARDWARE 0x40
-#define ICOM_HDW_ACTIVE 0x01
- u8 call_length; /* 1DE Phone #/CFI buff ln */
- u8 call_length2; /* 1DF Upper byte (unused) */
- u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
- u16 timer_value; /* 1E4-1E5 general timer value */
- u8 timer_command; /* 1E6 general timer cmd */
- u8 dce_command; /* 1E7 dce command reg */
- u8 dce_cmd_status; /* 1E8 dce command stat */
- u8 x21_r1_ioff; /* 1E9 dce ready counter */
- u8 x21_r0_ioff; /* 1EA dce not ready ctr */
- u8 x21_ralt_ioff; /* 1EB dce CNR counter */
- u8 x21_r1_ion; /* 1EC dce ready I on ctr */
- u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
- u8 ier; /* 1EE Interrupt Enable */
- u8 isr; /* 1EF Input Signal Reg */
- u8 osr; /* 1F0 Output Signal Reg */
- u8 reset; /* 1F1 Reset/Reload Reg */
- u8 disable; /* 1F2 Disable Reg */
- u8 sync; /* 1F3 Sync Reg */
- u8 error_stat; /* 1F4 Error Status */
- u8 cable_id; /* 1F5 Cable ID */
- u8 cs_length; /* 1F6 CS Load Length */
- u8 mac_length; /* 1F7 Mac Load Length */
- u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
- u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
-};
-
-/*
- * adapter defines and structures
- */
-#define ICOM_CONTROL_START_A 0x00000008
-#define ICOM_CONTROL_STOP_A 0x00000004
-#define ICOM_CONTROL_START_B 0x00000002
-#define ICOM_CONTROL_STOP_B 0x00000001
-#define ICOM_CONTROL_START_C 0x00000008
-#define ICOM_CONTROL_STOP_C 0x00000004
-#define ICOM_CONTROL_START_D 0x00000002
-#define ICOM_CONTROL_STOP_D 0x00000001
-#define ICOM_IRAM_OFFSET 0x1000
-#define ICOM_IRAM_SIZE 0x0C00
-#define ICOM_DCE_IRAM_OFFSET 0x0A00
-#define ICOM_CABLE_ID_VALID 0x01
-#define ICOM_CABLE_ID_MASK 0xF0
-#define ICOM_DISABLE 0x80
-#define CMD_XMIT_RCV_ENABLE 0xC0
-#define CMD_XMIT_ENABLE 0x40
-#define CMD_RCV_DISABLE 0x00
-#define CMD_RCV_ENABLE 0x80
-#define CMD_RESTART 0x01
-#define CMD_HOLD_XMIT 0x02
-#define CMD_SND_BREAK 0x04
-#define RS232_CABLE 0x06
-#define V24_CABLE 0x0E
-#define V35_CABLE 0x0C
-#define V36_CABLE 0x02
-#define NO_CABLE 0x00
-#define START_DOWNLOAD 0x80
-#define ICOM_INT_MASK_PRC_A 0x00003FFF
-#define ICOM_INT_MASK_PRC_B 0x3FFF0000
-#define ICOM_INT_MASK_PRC_C 0x00003FFF
-#define ICOM_INT_MASK_PRC_D 0x3FFF0000
-#define INT_RCV_COMPLETED 0x1000
-#define INT_XMIT_COMPLETED 0x2000
-#define INT_IDLE_DETECT 0x0800
-#define INT_RCV_DISABLED 0x0400
-#define INT_XMIT_DISABLED 0x0200
-#define INT_RCV_XMIT_SHUTDOWN 0x0100
-#define INT_FATAL_ERROR 0x0080
-#define INT_CABLE_PULL 0x0020
-#define INT_SIGNAL_CHANGE 0x0010
-#define HDLC_PPP_PURE_ASYNC 0x02
-#define HDLC_FF_FILL 0x00
-#define HDLC_HDW_FLOW 0x01
-#define START_XMIT 0x80
-#define ICOM_ACFG_DRIVE1 0x20
-#define ICOM_ACFG_NO_PARITY 0x00
-#define ICOM_ACFG_PARITY_ENAB 0x02
-#define ICOM_ACFG_PARITY_ODD 0x01
-#define ICOM_ACFG_8BPC 0x00
-#define ICOM_ACFG_7BPC 0x04
-#define ICOM_ACFG_6BPC 0x08
-#define ICOM_ACFG_5BPC 0x0C
-#define ICOM_ACFG_1STOP_BIT 0x00
-#define ICOM_ACFG_2STOP_BIT 0x10
-#define ICOM_DTR 0x80
-#define ICOM_RTS 0x40
-#define ICOM_RI 0x08
-#define ICOM_DSR 0x80
-#define ICOM_DCD 0x20
-#define ICOM_CTS 0x40
-
-#define NUM_XBUFFS 1
-#define NUM_RBUFFS 2
-#define RCV_BUFF_SZ 0x0200
-#define XMIT_BUFF_SZ 0x1000
-struct statusArea {
- /**********************************************/
- /* Transmit Status Area */
- /**********************************************/
- struct xmit_status_area{
- u32 leNext; /* Next entry in Little Endian on Adapter */
- u32 leNextASD;
- u32 leBuffer; /* Buffer for entry in LE for Adapter */
- u16 leLengthASD;
- u16 leOffsetASD;
- u16 leLength; /* Length of data in segment */
- u16 flags;
-#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
-#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
-#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
-#define SA_FLAGS_READY_TO_XMIT 0x0800
-#define SA_FLAGS_STAT_MASK 0x007F
- } xmit[NUM_XBUFFS];
-
- /**********************************************/
- /* Receive Status Area */
- /**********************************************/
- struct {
- u32 leNext; /* Next entry in Little Endian on Adapter */
- u32 leNextASD;
- u32 leBuffer; /* Buffer for entry in LE for Adapter */
- u16 WorkingLength; /* size of segment */
- u16 reserv01;
- u16 leLength; /* Length of data in segment */
- u16 flags;
-#define SA_FL_RCV_DONE 0x0010 /* Data ready */
-#define SA_FLAGS_OVERRUN 0x0040
-#define SA_FLAGS_PARITY_ERROR 0x0080
-#define SA_FLAGS_FRAME_ERROR 0x0001
-#define SA_FLAGS_FRAME_TRUNC 0x0002
-#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
-#define SA_FLAGS_RCV_MASK 0xFFE6
- } rcv[NUM_RBUFFS];
-};
-
-struct icom_adapter;
-
-
-#define ICOM_MAJOR 243
-#define ICOM_MINOR_START 0
-
-struct icom_port {
- struct uart_port uart_port;
- u8 imbed_modem;
-#define ICOM_UNKNOWN 1
-#define ICOM_RVX 2
-#define ICOM_IMBED_MODEM 3
- unsigned char cable_id;
- unsigned char read_status_mask;
- unsigned char ignore_status_mask;
- void __iomem * int_reg;
- struct icom_regs __iomem *global_reg;
- struct func_dram __iomem *dram;
- int port;
- struct statusArea *statStg;
- dma_addr_t statStg_pci;
- u32 *xmitRestart;
- dma_addr_t xmitRestart_pci;
- unsigned char *xmit_buf;
- dma_addr_t xmit_buf_pci;
- unsigned char *recv_buf;
- dma_addr_t recv_buf_pci;
- int next_rcv;
- int put_length;
- int status;
-#define ICOM_PORT_ACTIVE 1 /* Port exists. */
-#define ICOM_PORT_OFF 0 /* Port does not exist. */
- int load_in_progress;
- struct icom_adapter *adapter;
-};
-
-struct icom_adapter {
- void __iomem * base_addr;
- unsigned long base_addr_pci;
- struct pci_dev *pci_dev;
- struct icom_port port_info[4];
- int index;
- int version;
-#define ADAPTER_V1 0x0001
-#define ADAPTER_V2 0x0002
- u32 subsystem_id;
-#define FOUR_PORT_MODEL 0x0252
-#define V2_TWO_PORTS_RVX 0x021A
-#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
- int numb_ports;
- struct list_head icom_adapter_entry;
- struct kref kref;
-};
-
-/* prototype */
-extern void iCom_sercons_init(void);
-
-struct lookup_proc_table {
- u32 __iomem *global_control_reg;
- unsigned long processor_id;
-};
-
-struct lookup_int_table {
- u32 __iomem *global_int_mask;
- unsigned long processor_id;
-};
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b1639b174292..30edb35a6a15 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1937,8 +1937,6 @@ static int imx_uart_rs485_config(struct uart_port *port,
rs485conf->flags & SER_RS485_RX_DURING_TX)
imx_uart_start_rx(port);
- port->rs485 = *rs485conf;
-
return 0;
}
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 444f233ebd1f..3fd57ac3ad81 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -689,7 +689,7 @@ static void cls_param(struct jsm_channel *ch)
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
*/
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ if ((ch->ch_c_cflag & CBAUD) == B0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
ch->ch_e_head = 0;
@@ -723,14 +723,8 @@ static void cls_param(struct jsm_channel *ch)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
if (ch->ch_c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 110696cdaa1d..0c78f66276cd 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -938,7 +938,7 @@ static void neo_param(struct jsm_channel *ch)
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
*/
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ if ((ch->ch_c_cflag & CBAUD) == B0) {
ch->ch_r_head = ch->ch_r_tail = 0;
ch->ch_e_head = ch->ch_e_tail = 0;
@@ -997,14 +997,8 @@ static void neo_param(struct jsm_channel *ch)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
if (ch->ch_c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3112b4a05448..a0b6ea52d133 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1037,7 +1037,6 @@ static int max310x_rs485_config(struct uart_port *port,
rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
SER_RS485_ENABLED;
- memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
schedule_work(&one->rs_work);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 9acae5f8fc32..12117b596e73 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -833,7 +833,6 @@ static int men_z135_probe(struct mcb_device *mdev,
uart->port.iotype = UPIO_MEM;
uart->port.ops = &men_z135_ops;
uart->port.irq = mcb_get_irq(mdev);
- uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
uart->port.line = line++;
uart->port.dev = dev;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 2bf1c57e0981..4869c0059c98 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -68,6 +68,7 @@
#define AML_UART_BAUD_MASK 0x7fffff
#define AML_UART_BAUD_USE BIT(23)
#define AML_UART_BAUD_XTAL BIT(24)
+#define AML_UART_BAUD_XTAL_DIV2 BIT(27)
#define AML_UART_PORT_NUM 12
#define AML_UART_PORT_OFFSET 6
@@ -80,6 +81,10 @@ static struct uart_driver meson_uart_driver;
static struct uart_port *meson_ports[AML_UART_PORT_NUM];
+struct meson_uart_data {
+ bool has_xtal_div2;
+};
+
static void meson_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
@@ -253,6 +258,14 @@ static const char *meson_uart_type(struct uart_port *port)
return (port->type == PORT_MESON) ? "meson_uart" : NULL;
}
+/*
+ * This function is called only from probe() using a temporary io mapping
+ * in order to perform a reset before setting up the device. Since the
+ * temporarily mapped region was successfully requested, there can be no
+ * console on this port at this time. Hence it is not necessary for this
+ * function to acquire the port->lock. (Since there is no console on this
+ * port at this time, the port->lock is not initialized yet.)
+ */
static void meson_uart_reset(struct uart_port *port)
{
u32 val;
@@ -267,9 +280,12 @@ static void meson_uart_reset(struct uart_port *port)
static int meson_uart_startup(struct uart_port *port)
{
+ unsigned long flags;
u32 val;
int ret = 0;
+ spin_lock_irqsave(&port->lock, flags);
+
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
@@ -285,6 +301,8 @@ static int meson_uart_startup(struct uart_port *port)
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
+ spin_unlock_irqrestore(&port->lock, flags);
+
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
@@ -293,16 +311,23 @@ static int meson_uart_startup(struct uart_port *port)
static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
{
- u32 val;
+ const struct meson_uart_data *private_data = port->private_data;
+ u32 val = 0;
while (!meson_uart_tx_empty(port))
cpu_relax();
if (port->uartclk == 24000000) {
- val = ((port->uartclk / 3) / baud) - 1;
+ unsigned int xtal_div = 3;
+
+ if (private_data && private_data->has_xtal_div2) {
+ xtal_div = 2;
+ val |= AML_UART_BAUD_XTAL_DIV2;
+ }
+ val |= DIV_ROUND_CLOSEST(port->uartclk / xtal_div, baud) - 1;
val |= AML_UART_BAUD_XTAL;
} else {
- val = ((port->uartclk * 10 / (baud * 4) + 5) / 10) - 1;
+ val = DIV_ROUND_CLOSEST(port->uartclk / 4, baud) - 1;
}
val |= AML_UART_BAUD_USE;
writel(val, port->membase + AML_UART_REG5);
@@ -749,6 +774,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->x_char = 0;
port->ops = &meson_uart_ops;
port->fifosize = fifosize;
+ port->private_data = (void *)device_get_match_data(&pdev->dev);
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
@@ -777,11 +803,19 @@ static int meson_uart_remove(struct platform_device *pdev)
return 0;
}
+static struct meson_uart_data s4_uart_data = {
+ .has_xtal_div2 = true,
+};
+
static const struct of_device_id meson_uart_dt_match[] = {
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
{ .compatible = "amlogic,meson8b-uart" },
{ .compatible = "amlogic,meson-gx-uart" },
+ {
+ .compatible = "amlogic,meson-s4-uart",
+ .data = (void *)&s4_uart_data,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3acc0f185762..e50f069b5ebb 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -38,6 +38,8 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
@@ -754,9 +756,6 @@ static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
port->irqflags = IRQF_SHARED;
port->irq = psc_fifoc_irq;
}
-#endif
-
-#ifdef CONFIG_PPC_MPC512x
#define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase))
#define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1))
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 23c94b927776..e676ec761f18 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1599,6 +1599,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line)
static void __msm_console_write(struct uart_port *port, const char *s,
unsigned int count, bool is_uartdm)
{
+ unsigned long flags;
int i;
int num_newlines = 0;
bool replaced = false;
@@ -1616,6 +1617,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
+ local_irq_save(flags);
+
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
@@ -1661,6 +1664,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (locked)
spin_unlock(&port->lock);
+
+ local_irq_restore(flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 8d5ffa196097..46f4d4cacb6e 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1336,18 +1336,11 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
up->ier = 0;
serial_out(up, UART_IER, 0);
- /* Clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- /* store new config */
- port->rs485 = *rs485;
-
if (up->rts_gpiod) {
/* enable / disable rts */
- val = (port->rs485.flags & SER_RS485_ENABLED) ?
+ val = (rs485->flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
- val = (port->rs485.flags & val) ? 1 : 0;
+ val = (rs485->flags & val) ? 1 : 0;
gpiod_set_value(up->rts_gpiod, val);
}
@@ -1358,7 +1351,7 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
/* If RS-485 is disabled, make sure the THR interrupt is fired when
* TX FIFO is below the trigger level.
*/
- if (!(port->rs485.flags & SER_RS485_ENABLED) &&
+ if (!(rs485->flags & SER_RS485_ENABLED) &&
(up->scr & OMAP_UART_SCR_TX_EMPTY)) {
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 5250bd7d390a..44d20e5a7dd3 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -184,9 +184,6 @@ static void owl_uart_send_chars(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
unsigned int ch;
- if (uart_tx_stopped(port))
- return;
-
if (port->x_char) {
while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU))
cpu_relax();
@@ -195,6 +192,9 @@ static void owl_uart_send_chars(struct uart_port *port)
port->x_char = 0;
}
+ if (uart_tx_stopped(port))
+ return;
+
while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) {
if (uart_circ_empty(xmit))
break;
@@ -731,6 +731,7 @@ static int owl_uart_probe(struct platform_device *pdev)
owl_port->port.uartclk = clk_get_rate(owl_port->clk);
if (owl_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
+ clk_disable_unprepare(owl_port->clk);
return -EINVAL;
}
owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index affe71f8b50c..3b26524d48e3 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -550,18 +550,6 @@ static u8 pch_uart_hal_get_modem(struct eg20t_port *priv)
return (u8)msr;
}
-static void pch_uart_hal_write(struct eg20t_port *priv,
- const unsigned char *buf, int tx_size)
-{
- int i;
- unsigned int thr;
-
- for (i = 0; i < tx_size;) {
- thr = buf[i++];
- iowrite8(thr, priv->membase + PCH_UART_THR);
- }
-}
-
static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
int rx_size)
{
@@ -624,22 +612,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
return 0;
}
-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
-{
- int ret = 0;
- struct uart_port *port = &priv->port;
-
- if (port->x_char) {
- dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
- __func__, port->x_char, jiffies);
- buf[0] = port->x_char;
- port->x_char = 0;
- ret = 1;
- }
-
- return ret;
-}
-
static int dma_push_rx(struct eg20t_port *priv, int size)
{
int room;
@@ -785,31 +757,6 @@ static void pch_dma_tx_complete(void *arg)
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
-static int pop_tx(struct eg20t_port *priv, int size)
-{
- int count = 0;
- struct uart_port *port = &priv->port;
- struct circ_buf *xmit = &port->state->xmit;
-
- if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
- goto pop_tx_end;
-
- do {
- int cnt_to_end =
- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- int sz = min(size - count, cnt_to_end);
- pch_uart_hal_write(priv, &xmit->buf[xmit->tail], sz);
- xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
- count += sz;
- } while (!uart_circ_empty(xmit) && count < size);
-
-pop_tx_end:
- dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n",
- count, size - count, jiffies);
-
- return count;
-}
-
static int handle_rx_to(struct eg20t_port *priv)
{
struct pch_uart_buffer *buf;
@@ -875,8 +822,6 @@ static unsigned int handle_tx(struct eg20t_port *priv)
struct uart_port *port = &priv->port;
struct circ_buf *xmit = &port->state->xmit;
int fifo_size;
- int tx_size;
- int size;
int tx_empty;
if (!priv->start_tx) {
@@ -889,19 +834,19 @@ static unsigned int handle_tx(struct eg20t_port *priv)
fifo_size = max(priv->fifo_size, 1);
tx_empty = 1;
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+ if (port->x_char) {
+ iowrite8(port->x_char, priv->membase + PCH_UART_THR);
port->icount.tx++;
+ port->x_char = 0;
tx_empty = 0;
fifo_size--;
}
- size = min(xmit->head - xmit->tail, fifo_size);
- if (size < 0)
- size = fifo_size;
- tx_size = pop_tx(priv, size);
- if (tx_size > 0) {
- port->icount.tx += tx_size;
+ while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) {
+ iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ fifo_size--;
tx_empty = 0;
}
@@ -946,9 +891,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
fifo_size = max(priv->fifo_size, 1);
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+
+ if (port->x_char) {
+ iowrite8(port->x_char, priv->membase + PCH_UART_THR);
port->icount.tx++;
+ port->x_char = 0;
fifo_size--;
}
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b7a3a1b959b1..b399aac530fe 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -25,38 +25,114 @@
#include <linux/delay.h>
#include <asm/mach-pic32/pic32.h>
-#include "pic32_uart.h"
/* UART name and device definitions */
#define PIC32_DEV_NAME "pic32-uart"
#define PIC32_MAX_UARTS 6
#define PIC32_SDEV_NAME "ttyPIC"
-/* pic32_sport pointer for console use */
-static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+#define PIC32_UART_DFLT_BRATE 9600
+#define PIC32_UART_TX_FIFO_DEPTH 8
+#define PIC32_UART_RX_FIFO_DEPTH 8
+
+#define PIC32_UART_MODE 0x00
+#define PIC32_UART_STA 0x10
+#define PIC32_UART_TX 0x20
+#define PIC32_UART_RX 0x30
+#define PIC32_UART_BRG 0x40
+
+/* struct pic32_sport - pic32 serial port descriptor
+ * @port: uart port descriptor
+ * @idx: port index
+ * @irq_fault: virtual fault interrupt number
+ * @irq_fault_name: irq fault name
+ * @irq_rx: virtual rx interrupt number
+ * @irq_rx_name: irq rx name
+ * @irq_tx: virtual tx interrupt number
+ * @irq_tx_name: irq tx name
+ * @cts_gpio: clear to send gpio
+ * @dev: device descriptor
+ **/
+struct pic32_sport {
+ struct uart_port port;
+ int idx;
+
+ int irq_fault;
+ const char *irq_fault_name;
+ int irq_rx;
+ const char *irq_rx_name;
+ int irq_tx;
+ const char *irq_tx_name;
+ bool enable_tx_irq;
+
+ bool hw_flow_ctrl;
+ int cts_gpio;
+
+ struct clk *clk;
+
+ struct device *dev;
+};
-static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
+static inline struct pic32_sport *to_pic32_sport(struct uart_port *port)
{
- /* wait for tx empty, otherwise chars will be lost or corrupted */
- while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
- udelay(1);
+ return container_of(port, struct pic32_sport, port);
}
-static inline int pic32_enable_clock(struct pic32_sport *sport)
+static inline void pic32_uart_writel(struct pic32_sport *sport,
+ u32 reg, u32 val)
{
- int ret = clk_prepare_enable(sport->clk);
-
- if (ret)
- return ret;
+ __raw_writel(val, sport->port.membase + reg);
+}
- sport->ref_clk++;
- return 0;
+static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
+{
+ return __raw_readl(sport->port.membase + reg);
}
-static inline void pic32_disable_clock(struct pic32_sport *sport)
+/* pic32 uart mode register bits */
+#define PIC32_UART_MODE_ON BIT(15)
+#define PIC32_UART_MODE_FRZ BIT(14)
+#define PIC32_UART_MODE_SIDL BIT(13)
+#define PIC32_UART_MODE_IREN BIT(12)
+#define PIC32_UART_MODE_RTSMD BIT(11)
+#define PIC32_UART_MODE_RESV1 BIT(10)
+#define PIC32_UART_MODE_UEN1 BIT(9)
+#define PIC32_UART_MODE_UEN0 BIT(8)
+#define PIC32_UART_MODE_WAKE BIT(7)
+#define PIC32_UART_MODE_LPBK BIT(6)
+#define PIC32_UART_MODE_ABAUD BIT(5)
+#define PIC32_UART_MODE_RXINV BIT(4)
+#define PIC32_UART_MODE_BRGH BIT(3)
+#define PIC32_UART_MODE_PDSEL1 BIT(2)
+#define PIC32_UART_MODE_PDSEL0 BIT(1)
+#define PIC32_UART_MODE_STSEL BIT(0)
+
+/* pic32 uart status register bits */
+#define PIC32_UART_STA_UTXISEL1 BIT(15)
+#define PIC32_UART_STA_UTXISEL0 BIT(14)
+#define PIC32_UART_STA_UTXINV BIT(13)
+#define PIC32_UART_STA_URXEN BIT(12)
+#define PIC32_UART_STA_UTXBRK BIT(11)
+#define PIC32_UART_STA_UTXEN BIT(10)
+#define PIC32_UART_STA_UTXBF BIT(9)
+#define PIC32_UART_STA_TRMT BIT(8)
+#define PIC32_UART_STA_URXISEL1 BIT(7)
+#define PIC32_UART_STA_URXISEL0 BIT(6)
+#define PIC32_UART_STA_ADDEN BIT(5)
+#define PIC32_UART_STA_RIDLE BIT(4)
+#define PIC32_UART_STA_PERR BIT(3)
+#define PIC32_UART_STA_FERR BIT(2)
+#define PIC32_UART_STA_OERR BIT(1)
+#define PIC32_UART_STA_URXDA BIT(0)
+
+/* pic32_sport pointer for console use */
+static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+
+static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
{
- sport->ref_clk--;
- clk_disable_unprepare(sport->clk);
+ /* wait for tx empty, otherwise chars will be lost or corrupted */
+ while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
+ udelay(1);
}
/* serial core request to check if uart tx buffer is empty */
@@ -117,16 +193,16 @@ static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
*/
static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en)
{
- if (en && !tx_irq_enabled(sport)) {
+ if (en && !sport->enable_tx_irq) {
enable_irq(sport->irq_tx);
- tx_irq_enabled(sport) = 1;
- } else if (!en && tx_irq_enabled(sport)) {
+ sport->enable_tx_irq = true;
+ } else if (!en && sport->enable_tx_irq) {
/* use disable_irq_nosync() and not disable_irq() to avoid self
* imposed deadlock by not waiting for irq handler to end,
* since this callback is called from interrupt context.
*/
disable_irq_nosync(sport->irq_tx);
- tx_irq_enabled(sport) = 0;
+ sport->enable_tx_irq = false;
}
}
@@ -395,7 +471,7 @@ static int pic32_uart_startup(struct uart_port *port)
local_irq_save(flags);
- ret = pic32_enable_clock(sport);
+ ret = clk_prepare_enable(sport->clk);
if (ret) {
local_irq_restore(flags);
goto out_done;
@@ -419,7 +495,7 @@ static int pic32_uart_startup(struct uart_port *port)
* For each irq request_irq() is called with interrupt disabled.
* And the irq is enabled as soon as we are ready to handle them.
*/
- tx_irq_enabled(sport) = 0;
+ sport->enable_tx_irq = false;
sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault",
pic32_uart_type(port),
@@ -431,7 +507,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
- sport->irqflags_fault, sport->irq_fault_name, port);
+ IRQF_NO_THREAD, sport->irq_fault_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_fault, ret,
@@ -449,7 +525,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt,
- sport->irqflags_rx, sport->irq_rx_name, port);
+ IRQF_NO_THREAD, sport->irq_rx_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_rx, ret,
@@ -467,7 +543,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt,
- sport->irqflags_tx, sport->irq_tx_name, port);
+ IRQF_NO_THREAD, sport->irq_tx_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_tx, ret,
@@ -488,19 +564,21 @@ static int pic32_uart_startup(struct uart_port *port)
/* enable all interrupts and eanable uart */
pic32_uart_en_and_unmask(port);
+ local_irq_restore(flags);
+
enable_irq(sport->irq_rx);
return 0;
out_t:
- kfree(sport->irq_tx_name);
free_irq(sport->irq_tx, port);
+ kfree(sport->irq_tx_name);
out_r:
- kfree(sport->irq_rx_name);
free_irq(sport->irq_rx, port);
+ kfree(sport->irq_rx_name);
out_f:
- kfree(sport->irq_fault_name);
free_irq(sport->irq_fault, port);
+ kfree(sport->irq_fault_name);
out_done:
return ret;
}
@@ -515,12 +593,15 @@ static void pic32_uart_shutdown(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
pic32_uart_dsbl_and_mask(port);
spin_unlock_irqrestore(&port->lock, flags);
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
/* free all 3 interrupts for this UART */
free_irq(sport->irq_fault, port);
+ kfree(sport->irq_fault_name);
free_irq(sport->irq_tx, port);
+ kfree(sport->irq_tx_name);
free_irq(sport->irq_rx, port);
+ kfree(sport->irq_rx_name);
}
/* serial core request to change current uart setting */
@@ -712,10 +793,9 @@ static void pic32_console_write(struct console *co, const char *s,
unsigned int count)
{
struct pic32_sport *sport = pic32_sports[co->index];
- struct uart_port *port = pic32_get_port(sport);
/* call uart helper to deal with \r\n */
- uart_console_write(port, s, count, pic32_console_putchar);
+ uart_console_write(&sport->port, s, count, pic32_console_putchar);
}
/* console core request to setup given console, find matching uart
@@ -724,7 +804,6 @@ static void pic32_console_write(struct console *co, const char *s,
static int pic32_console_setup(struct console *co, char *options)
{
struct pic32_sport *sport;
- struct uart_port *port = NULL;
int baud = 115200;
int bits = 8;
int parity = 'n';
@@ -737,16 +816,15 @@ static int pic32_console_setup(struct console *co, char *options)
sport = pic32_sports[co->index];
if (!sport)
return -ENODEV;
- port = pic32_get_port(sport);
- ret = pic32_enable_clock(sport);
+ ret = clk_prepare_enable(sport->clk);
if (ret)
return ret;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- return uart_set_options(port, co, baud, parity, bits, flow);
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver pic32_uart_driver;
@@ -816,13 +894,9 @@ static int pic32_uart_probe(struct platform_device *pdev)
sport->idx = uart_idx;
sport->irq_fault = irq_of_parse_and_map(np, 0);
- sport->irqflags_fault = IRQF_NO_THREAD;
sport->irq_rx = irq_of_parse_and_map(np, 1);
- sport->irqflags_rx = IRQF_NO_THREAD;
sport->irq_tx = irq_of_parse_and_map(np, 2);
- sport->irqflags_tx = IRQF_NO_THREAD;
sport->clk = devm_clk_get(&pdev->dev, NULL);
- sport->cts_gpio = -EINVAL;
sport->dev = &pdev->dev;
/* Hardware flow control: gpios
@@ -850,7 +924,6 @@ static int pic32_uart_probe(struct platform_device *pdev)
pic32_sports[uart_idx] = sport;
port = &sport->port;
- memset(port, 0, sizeof(*port));
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->ops = &pic32_uart_ops;
@@ -872,7 +945,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
/* The peripheral clock has been enabled by console_setup,
* so disable it till the port is used.
*/
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
}
#endif
@@ -893,7 +966,7 @@ static int pic32_uart_remove(struct platform_device *pdev)
struct pic32_sport *sport = to_pic32_sport(port);
uart_remove_one_port(&pic32_uart_driver, port);
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
platform_set_drvdata(pdev, NULL);
pic32_sports[sport->idx] = NULL;
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
deleted file mode 100644
index b15639cc336b..000000000000
--- a/drivers/tty/serial/pic32_uart.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * PIC32 Integrated Serial Driver.
- *
- * Copyright (C) 2015 Microchip Technology, Inc.
- *
- * Authors:
- * Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- */
-#ifndef __DT_PIC32_UART_H__
-#define __DT_PIC32_UART_H__
-
-#define PIC32_UART_DFLT_BRATE (9600)
-#define PIC32_UART_TX_FIFO_DEPTH (8)
-#define PIC32_UART_RX_FIFO_DEPTH (8)
-
-#define PIC32_UART_MODE 0x00
-#define PIC32_UART_STA 0x10
-#define PIC32_UART_TX 0x20
-#define PIC32_UART_RX 0x30
-#define PIC32_UART_BRG 0x40
-
-struct pic32_console_opt {
- int baud;
- int parity;
- int bits;
- int flow;
-};
-
-/* struct pic32_sport - pic32 serial port descriptor
- * @port: uart port descriptor
- * @idx: port index
- * @irq_fault: virtual fault interrupt number
- * @irqflags_fault: flags related to fault irq
- * @irq_fault_name: irq fault name
- * @irq_rx: virtual rx interrupt number
- * @irqflags_rx: flags related to rx irq
- * @irq_rx_name: irq rx name
- * @irq_tx: virtual tx interrupt number
- * @irqflags_tx: : flags related to tx irq
- * @irq_tx_name: irq tx name
- * @cts_gpio: clear to send gpio
- * @dev: device descriptor
- **/
-struct pic32_sport {
- struct uart_port port;
- struct pic32_console_opt opt;
- int idx;
-
- int irq_fault;
- int irqflags_fault;
- const char *irq_fault_name;
- int irq_rx;
- int irqflags_rx;
- const char *irq_rx_name;
- int irq_tx;
- int irqflags_tx;
- const char *irq_tx_name;
- u8 enable_tx_irq;
-
- bool hw_flow_ctrl;
- int cts_gpio;
-
- int ref_clk;
- struct clk *clk;
-
- struct device *dev;
-};
-#define to_pic32_sport(c) container_of(c, struct pic32_sport, port)
-#define pic32_get_port(sport) (&sport->port)
-#define pic32_get_opt(sport) (&sport->opt)
-#define tx_irq_enabled(sport) (sport->enable_tx_irq)
-
-static inline void pic32_uart_writel(struct pic32_sport *sport,
- u32 reg, u32 val)
-{
- struct uart_port *port = pic32_get_port(sport);
-
- __raw_writel(val, port->membase + reg);
-}
-
-static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
-{
- struct uart_port *port = pic32_get_port(sport);
-
- return __raw_readl(port->membase + reg);
-}
-
-/* pic32 uart mode register bits */
-#define PIC32_UART_MODE_ON BIT(15)
-#define PIC32_UART_MODE_FRZ BIT(14)
-#define PIC32_UART_MODE_SIDL BIT(13)
-#define PIC32_UART_MODE_IREN BIT(12)
-#define PIC32_UART_MODE_RTSMD BIT(11)
-#define PIC32_UART_MODE_RESV1 BIT(10)
-#define PIC32_UART_MODE_UEN1 BIT(9)
-#define PIC32_UART_MODE_UEN0 BIT(8)
-#define PIC32_UART_MODE_WAKE BIT(7)
-#define PIC32_UART_MODE_LPBK BIT(6)
-#define PIC32_UART_MODE_ABAUD BIT(5)
-#define PIC32_UART_MODE_RXINV BIT(4)
-#define PIC32_UART_MODE_BRGH BIT(3)
-#define PIC32_UART_MODE_PDSEL1 BIT(2)
-#define PIC32_UART_MODE_PDSEL0 BIT(1)
-#define PIC32_UART_MODE_STSEL BIT(0)
-
-/* pic32 uart status register bits */
-#define PIC32_UART_STA_UTXISEL1 BIT(15)
-#define PIC32_UART_STA_UTXISEL0 BIT(14)
-#define PIC32_UART_STA_UTXINV BIT(13)
-#define PIC32_UART_STA_URXEN BIT(12)
-#define PIC32_UART_STA_UTXBRK BIT(11)
-#define PIC32_UART_STA_UTXEN BIT(10)
-#define PIC32_UART_STA_UTXBF BIT(9)
-#define PIC32_UART_STA_TRMT BIT(8)
-#define PIC32_UART_STA_URXISEL1 BIT(7)
-#define PIC32_UART_STA_URXISEL0 BIT(6)
-#define PIC32_UART_STA_ADDEN BIT(5)
-#define PIC32_UART_STA_RIDLE BIT(4)
-#define PIC32_UART_STA_PERR BIT(3)
-#define PIC32_UART_STA_FERR BIT(2)
-#define PIC32_UART_STA_OERR BIT(1)
-#define PIC32_UART_STA_URXDA BIT(0)
-
-#endif /* __DT_PIC32_UART_H__ */
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5d97c201ad88..3133446e806c 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -24,7 +24,6 @@
*/
#undef DEBUG
-#undef DEBUG_HARD
#undef USE_CTRL_O_SYSRQ
#include <linux/module.h>
@@ -51,7 +50,6 @@
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
@@ -66,10 +64,6 @@
#include "pmac_zilog.h"
-/* Not yet implemented */
-#undef HAS_DBDMA
-
-static char version[] __initdata = "pmac_zilog: 0.6 (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("Driver for the Mac and PowerMac serial ports.");
MODULE_LICENSE("GPL");
@@ -446,9 +440,6 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
spin_lock(&uap_a->port.lock);
r3 = read_zsreg(uap_a, R3);
-#ifdef DEBUG_HARD
- pmz_debug("irq, r3: %x\n", r3);
-#endif
/* Channel A */
push = false;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
@@ -613,8 +604,6 @@ static void pmz_start_tx(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned char status;
- pmz_debug("pmz: start_tx()\n");
-
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
@@ -636,7 +625,7 @@ static void pmz_start_tx(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
- goto out;
+ return;
write_zsdata(uap, xmit->buf[xmit->tail]);
zssync(uap);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -645,8 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
}
- out:
- pmz_debug("pmz: start_tx() done.\n");
}
/*
@@ -659,13 +646,9 @@ static void pmz_stop_rx(struct uart_port *port)
{
struct uart_pmac_port *uap = to_pmz(port);
- pmz_debug("pmz: stop_rx()()\n");
-
/* Disable all RX interrupts. */
uap->curregs[R1] &= ~RxINT_MASK;
pmz_maybe_update_regs(uap);
-
- pmz_debug("pmz: stop_rx() done.\n");
}
/*
@@ -910,8 +893,6 @@ static int pmz_startup(struct uart_port *port)
unsigned long flags;
int pwr_delay = 0;
- pmz_debug("pmz: startup()\n");
-
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
/* A console is never powered down. Else, power up and
@@ -947,8 +928,6 @@ static int pmz_startup(struct uart_port *port)
pmz_interrupt_control(uap, 1);
spin_unlock_irqrestore(&port->lock, flags);
- pmz_debug("pmz: startup() done.\n");
-
return 0;
}
@@ -957,8 +936,6 @@ static void pmz_shutdown(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- pmz_debug("pmz: shutdown()\n");
-
spin_lock_irqsave(&port->lock, flags);
/* Disable interrupt requests for the channel */
@@ -987,8 +964,6 @@ static void pmz_shutdown(struct uart_port *port)
pmz_set_scc_power(uap, 0); /* Shut the chip down */
spin_unlock_irqrestore(&port->lock, flags);
-
- pmz_debug("pmz: shutdown() done.\n");
}
/* Shared by TTY driver and serial console setup. The port lock is held
@@ -1233,10 +1208,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
struct uart_pmac_port *uap = to_pmz(port);
unsigned long baud;
- pmz_debug("pmz: set_termios()\n");
-
- memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
-
/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
* on the IR dongle. Note that the IRTTY driver currently doesn't know
* about the FIR mode and high speed modes. So these are unused. For
@@ -1270,8 +1241,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
pmz_maybe_update_regs(uap);
}
uart_update_timeout(port, termios->c_cflag, baud);
-
- pmz_debug("pmz: set_termios() done.\n");
}
/* The port lock is not held. */
@@ -1400,7 +1369,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
char name[1];
} *slots;
int len;
- struct resource r_ports, r_rxdma, r_txdma;
+ struct resource r_ports;
/*
* Request & map chip registers
@@ -1412,35 +1381,6 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
uap->control_reg = uap->port.membase;
uap->data_reg = uap->control_reg + 0x10;
-
- /*
- * Request & map DBDMA registers
- */
-#ifdef HAS_DBDMA
- if (of_address_to_resource(np, 1, &r_txdma) == 0 &&
- of_address_to_resource(np, 2, &r_rxdma) == 0)
- uap->flags |= PMACZILOG_FLAG_HAS_DMA;
-#else
- memset(&r_txdma, 0, sizeof(struct resource));
- memset(&r_rxdma, 0, sizeof(struct resource));
-#endif
- if (ZS_HAS_DMA(uap)) {
- uap->tx_dma_regs = ioremap(r_txdma.start, 0x100);
- if (uap->tx_dma_regs == NULL) {
- uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
- goto no_dma;
- }
- uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100);
- if (uap->rx_dma_regs == NULL) {
- iounmap(uap->tx_dma_regs);
- uap->tx_dma_regs = NULL;
- uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
- goto no_dma;
- }
- uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
- uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
- }
-no_dma:
/*
* Detect port type
@@ -1506,8 +1446,6 @@ no_dma:
of_device_is_compatible(np->parent->parent, "gatwick")) {
/* IRQs on gatwick are offset by 64 */
uap->port.irq = irq_create_mapping(NULL, 64 + 15);
- uap->tx_dma_irq = irq_create_mapping(NULL, 64 + 4);
- uap->rx_dma_irq = irq_create_mapping(NULL, 64 + 5);
}
/* Setup some valid baud rate information in the register
@@ -1527,8 +1465,6 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
struct device_node *np;
np = uap->node;
- iounmap(uap->rx_dma_regs);
- iounmap(uap->tx_dma_regs);
iounmap(uap->control_reg);
uap->node = NULL;
of_node_put(np);
@@ -1875,7 +1811,6 @@ static struct platform_driver pmz_driver = {
static int __init init_pmz(void)
{
int rc, i;
- printk(KERN_INFO "%s\n", version);
/*
* First, we need to do a direct OF-based probe pass. We
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index fa85b0de5c2f..837b97ca0a90 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -43,7 +43,6 @@ struct uart_pmac_port {
#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
#define PMACZILOG_FLAG_IS_IRDA 0x00000100
#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
-#define PMACZILOG_FLAG_HAS_DMA 0x00000400
#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
#define PMACZILOG_FLAG_IS_OPEN 0x00002000
#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
@@ -55,16 +54,7 @@ struct uart_pmac_port {
volatile u8 __iomem *control_reg;
volatile u8 __iomem *data_reg;
-#ifdef CONFIG_PPC_PMAC
- unsigned int tx_dma_irq;
- unsigned int rx_dma_irq;
- volatile struct dbdma_regs __iomem *tx_dma_regs;
- volatile struct dbdma_regs __iomem *rx_dma_regs;
-#endif
-
unsigned char irq_name[8];
-
- struct ktermios termios_cache;
};
#define to_pmz(p) ((struct uart_pmac_port *)(p))
@@ -377,7 +367,6 @@ static inline void zssync(struct uart_pmac_port *port)
#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & PMACZILOG_FLAG_MODEM_STATUS)
#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
-#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 1543a6028856..4733a233bd0c 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -149,12 +149,6 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
-static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
- 32000000, 48000000, 51200000, 64000000,
- 80000000, 96000000, 100000000,
- 102400000, 112000000, 120000000,
- 128000000};
-
#define to_dev_port(ptr, member) \
container_of(ptr, struct qcom_geni_serial_port, member)
@@ -507,7 +501,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
*/
qcom_geni_serial_poll_tx_done(uport);
- if (uart_circ_chars_pending(&uport->state->xmit)) {
+ if (!uart_circ_empty(&uport->state->xmit)) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
@@ -946,25 +940,43 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_cfg(unsigned long clk_freq)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
- if (!(root_freq[i] % clk_freq))
- return root_freq[i];
- }
- return 0;
-}
-
-static unsigned long get_clk_div_rate(unsigned int baud,
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
unsigned int sampling_rate, unsigned int *clk_div)
{
unsigned long ser_clk;
unsigned long desired_clk;
+ unsigned long freq, prev;
+ unsigned long div, maxdiv;
+ int64_t mult;
desired_clk = baud * sampling_rate;
- ser_clk = get_clk_cfg(desired_clk);
+ if (!desired_clk) {
+ pr_err("%s: Invalid frequency\n", __func__);
+ return 0;
+ }
+
+ maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
+ prev = 0;
+
+ for (div = 1; div <= maxdiv; div++) {
+ mult = div * desired_clk;
+ if (mult > ULONG_MAX)
+ break;
+
+ freq = clk_round_rate(clk, (unsigned long)mult);
+ if (!(freq % desired_clk)) {
+ ser_clk = freq;
+ break;
+ }
+
+ if (!prev)
+ ser_clk = freq;
+ else if (prev == freq)
+ break;
+
+ prev = freq;
+ }
+
if (!ser_clk) {
pr_err("%s: Can't find matching DFS entry for baud %d\n",
__func__, baud);
@@ -972,6 +984,9 @@ static unsigned long get_clk_div_rate(unsigned int baud,
}
*clk_div = ser_clk / desired_clk;
+ if (!(*clk_div))
+ *clk_div = 1;
+
return ser_clk;
}
@@ -1003,7 +1018,8 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
if (ver >= QUP_SE_VERSION_2_5)
sampling_rate /= 2;
- clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
+ clk_rate = get_clk_div_rate(port->se.clk, baud,
+ sampling_rate, &clk_div);
if (!clk_rate)
goto out_restart_rx;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index e5f1fded423a..f556b4955f59 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port,
fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS7;
break;
default:
ctrl |= RDA_UART_DBITS_8;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 5fe6cccfc1ae..e64e42a19d1a 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -446,6 +446,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
+ del_timer_sync(&sport->timer);
+
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
@@ -476,8 +478,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
UTSR1_TO_SM(UTSR1_ROR);
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index e1585fbae909..d5ca904def34 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -2480,12 +2480,24 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+ unsigned long flags;
+ bool locked = true;
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
return;
+ if (cons_uart->sysrq)
+ locked = false;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&cons_uart->lock, flags);
+ else
+ spin_lock_irqsave(&cons_uart->lock, flags);
+
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
+
+ if (locked)
+ spin_unlock_irqrestore(&cons_uart->lock, flags);
}
/* Shouldn't be __init, as it can be instantiated from other module */
@@ -2814,6 +2826,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.num_clks = 1,
.clksel_mask = 0,
.clksel_shift = 0,
+ .ucon_mask = APPLE_S5L_UCON_MASK,
},
.def_cfg = {
.ucon = APPLE_S5L_UCON_DEFAULT,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 5fb201c1b563..8472bf70477c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1134,16 +1134,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
if (rs485->flags & SER_RS485_ENABLED) {
- bool rts_during_rx, rts_during_tx;
-
- rts_during_rx = rs485->flags & SER_RS485_RTS_AFTER_SEND;
- rts_during_tx = rs485->flags & SER_RS485_RTS_ON_SEND;
-
- if (rts_during_rx == rts_during_tx)
- dev_err(port->dev,
- "unsupported RTS signalling on_send:%d after_send:%d - exactly one of RS485 RTS flags should be set\n",
- rts_during_tx, rts_during_rx);
-
/*
* RTS signal is handled by HW, it's timing can't be influenced.
* However, it's sometimes useful to delay TX even without RTS
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6a8963caf954..9a85b41caa0a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -24,6 +24,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/math64.h>
#include <linux/security.h>
#include <linux/irq.h>
@@ -42,6 +43,11 @@ static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+/*
+ * Max time with active RTS before/after data is sent.
+ */
+#define RS485_MAX_RTS_DELAY 100 /* msecs */
+
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -333,15 +339,18 @@ void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
- unsigned int size;
+ unsigned int size = tty_get_frame_size(cflag);
+ u64 frame_time;
- size = tty_get_frame_size(cflag) * port->fifosize;
+ frame_time = (u64)size * NSEC_PER_SEC;
+ size *= port->fifosize;
/*
* Figure the timeout to send the above number of bits.
* Add .02 seconds of slop
*/
port->timeout = (HZ * size) / baud + HZ/50;
+ port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
@@ -1296,8 +1305,36 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
+ /* pick sane settings if the user hasn't */
+ if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485.flags |= SER_RS485_RTS_ON_SEND;
+ rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
+ }
+
+ if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_before_send);
+ }
+
+ if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_after_send);
+ }
+ /* Return clean padding area to userspace */
+ memset(rs485.padding, 0, sizeof(rs485.padding));
+
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &rs485);
+ if (!ret)
+ port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
@@ -1610,24 +1647,24 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
- char_time = (port->timeout - HZ/50) / port->fifosize;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
+ char_time = max(nsecs_to_jiffies(port->frame_time / 5), 1UL);
+
if (timeout && timeout < char_time)
char_time = timeout;
- /*
- * If the transmitter hasn't cleared in twice the approximate
- * amount of time to send the entire FIFO, it probably won't
- * ever clear. This assumes the UART isn't doing flow
- * control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
- * UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
- */
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ if (!uart_cts_enabled(port)) {
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than port->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*port->timeout.
+ */
+ if (timeout == 0 || timeout > 2 * port->timeout)
+ timeout = 2 * port->timeout;
+ }
expire = jiffies + timeout;
@@ -1643,7 +1680,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
- if (time_after(jiffies, expire))
+ if (timeout && time_after(jiffies, expire))
break;
}
uart_port_deref(port);
@@ -2174,15 +2211,23 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
put_device(tty_dev);
- /* Nothing to do if the console is not suspending */
- if (!console_suspend_enabled && uart_console(uport))
+ /*
+ * Nothing to do if the console is not suspending
+ * except stop_rx to prevent any asynchronous data
+ * over RX line. Re-start_rx, when required, is
+ * done by set_termios in resume sequence
+ */
+ if (!console_suspend_enabled && uart_console(uport)) {
+ uport->ops->stop_rx(uport);
goto unlock;
+ }
uport->suspended = 1;
if (tty_port_initialized(port)) {
const struct uart_ops *ops = uport->ops;
int tries;
+ unsigned int mctrl;
tty_port_set_suspended(port, 1);
tty_port_set_initialized(port, 0);
@@ -2190,6 +2235,9 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
ops->set_mctrl(uport, 0);
+ /* save mctrl so it can be restored on resume */
+ mctrl = uport->mctrl;
+ uport->mctrl = 0;
ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
@@ -2203,6 +2251,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
uport->name);
ops->shutdown(uport);
+ uport->mctrl = mctrl;
}
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 2213e6b841d3..228e380db080 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -618,6 +618,8 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0f9b8bd23500..0075a1420005 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2379,8 +2379,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
int best_clk = -1;
unsigned long flags;
- if ((termios->c_cflag & CSIZE) == CS7)
+ if ((termios->c_cflag & CSIZE) == CS7) {
smr_val |= SCSMR_CHR;
+ } else {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_cflag & PARENB)
smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index f5ac14c384c4..c0869b080cc3 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -148,7 +148,6 @@
* @port: struct uart_port embedded in this struct
* @dev: struct device *
* @ier: shadowed copy of the interrupt enable register
- * @clkin_rate: input clock to the UART IP block.
* @baud_rate: UART serial line rate (e.g., 115200 baud)
* @clk: reference to this device's clock
* @clk_notifier: clock rate change notifier for upstream clock changes
@@ -159,7 +158,6 @@ struct sifive_serial_port {
struct uart_port port;
struct device *dev;
unsigned char ier;
- unsigned long clkin_rate;
unsigned long baud_rate;
struct clk *clk;
struct notifier_block clk_notifier;
@@ -463,7 +461,7 @@ static void __ssp_update_div(struct sifive_serial_port *ssp)
{
u16 div;
- div = DIV_ROUND_UP(ssp->clkin_rate, ssp->baud_rate) - 1;
+ div = DIV_ROUND_UP(ssp->port.uartclk, ssp->baud_rate) - 1;
__ssp_writel(div, SIFIVE_SERIAL_DIV_OFFS, ssp);
}
@@ -648,8 +646,8 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate));
}
- if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) {
- ssp->clkin_rate = cnd->new_rate;
+ if (event == POST_RATE_CHANGE && ssp->port.uartclk != cnd->new_rate) {
+ ssp->port.uartclk = cnd->new_rate;
__ssp_update_div(ssp);
}
@@ -666,19 +664,24 @@ static void sifive_serial_set_termios(struct uart_port *port,
int rate;
char nstop;
- if ((termios->c_cflag & CSIZE) != CS8)
+ if ((termios->c_cflag & CSIZE) != CS8) {
dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_iflag & (INPCK | PARMRK))
dev_err_once(ssp->port.dev, "parity checking not supported\n");
if (termios->c_iflag & BRKINT)
dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+ termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
/* Set number of stop bits */
nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
__ssp_set_stop_bits(ssp, nstop);
/* Set line rate */
- rate = uart_get_baud_rate(port, termios, old, 0, ssp->clkin_rate / 16);
+ rate = uart_get_baud_rate(port, termios, old, 0,
+ ssp->port.uartclk / 16);
__ssp_update_baud_rate(ssp, rate);
spin_lock_irqsave(&ssp->port.lock, flags);
@@ -996,9 +999,8 @@ static int sifive_serial_probe(struct platform_device *pdev)
}
/* Set up clock divider */
- ssp->clkin_rate = clk_get_rate(ssp->clk);
+ ssp->port.uartclk = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
- ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index d7fd692286cf..1b0da603ab54 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -535,10 +535,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* set character length */
if ((cflag & CSIZE) == CS7) {
ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+ cflag |= PARENB;
} else {
ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
ASC_CTL_MODE_8BIT;
+ cflag &= ~CSIZE;
+ cflag |= CS8;
}
+ termios->c_cflag = cflag;
/* set stop bit */
ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 87b5cd4c9743..b7b44f4050d4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -37,6 +37,7 @@
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
static inline struct stm32_port *to_stm32_port(struct uart_port *port)
{
@@ -107,8 +108,6 @@ static int stm32_usart_config_rs485(struct uart_port *port,
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- port->rs485 = *rs485conf;
-
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (rs485conf->flags & SER_RS485_ENABLED) {
@@ -128,13 +127,10 @@ static int stm32_usart_config_rs485(struct uart_port *port,
rs485conf->delay_rts_after_send,
baud);
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
cr3 &= ~USART_CR3_DEP;
- rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
+ else
cr3 |= USART_CR3_DEP;
- rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- }
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
@@ -421,6 +417,14 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
+static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
static void stm32_usart_rx_dma_complete(void *arg)
{
struct uart_port *port = arg;
@@ -446,6 +450,50 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
+static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
+static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ }
+}
+
+static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ }
+}
+
static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -553,6 +601,13 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
u32 isr;
int ret;
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED) {
+ stm32_port->txdone = false;
+ stm32_usart_tc_interrupt_disable(port);
+ stm32_usart_rs485_rts_enable(port);
+ }
+
if (port->x_char) {
if (stm32_usart_tx_dma_started(stm32_port) &&
stm32_usart_tx_dma_enabled(stm32_port))
@@ -593,8 +648,14 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
stm32_usart_tx_interrupt_disable(port);
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED) {
+ stm32_port->txdone = true;
+ stm32_usart_tc_interrupt_enable(port);
+ }
+ }
}
static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
@@ -608,6 +669,13 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
sr = readl_relaxed(port->membase + ofs->isr);
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED &&
+ (sr & USART_SR_TC)) {
+ stm32_usart_tc_interrupt_disable(port);
+ stm32_usart_rs485_rts_disable(port);
+ }
+
if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
@@ -717,44 +785,27 @@ static void stm32_usart_disable_ms(struct uart_port *port)
static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_tx_interrupt_disable(port);
if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- if (rs485conf->flags & SER_RS485_ENABLED) {
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- }
- }
+ stm32_usart_rs485_rts_disable(port);
}
/* There are probably characters waiting to be transmitted. */
static void stm32_usart_start_tx(struct uart_port *port)
{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
- if (uart_circ_empty(xmit) && !port->x_char)
+ if (uart_circ_empty(xmit) && !port->x_char) {
+ stm32_usart_rs485_rts_disable(port);
return;
-
- if (rs485conf->flags & SER_RS485_ENABLED) {
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- }
}
+ stm32_usart_rs485_rts_enable(port);
+
stm32_usart_transmit_chars(port);
}
@@ -1037,13 +1088,22 @@ static void stm32_usart_set_termios(struct uart_port *port,
* CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
* M0 and M1 already cleared by cr1 initialization.
*/
- if (bits == 9)
+ if (bits == 9) {
cr1 |= USART_CR1_M0;
- else if ((bits == 7) && cfg->has_7bits_data)
+ } else if ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
- else if (bits != 8)
+ } else if (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
+ cflag &= ~CSIZE;
+ cflag |= CS8;
+ termios->c_cflag = cflag;
+ bits = 8;
+ if (cflag & PARENB) {
+ bits++;
+ cr1 |= USART_CR1_M0;
+ }
+ }
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
(stm32_port->fifoen &&
@@ -1222,6 +1282,33 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
}
}
+#if defined(CONFIG_CONSOLE_POLL)
+
+ /* Callbacks for characters polling in debug context (i.e. KGDB). */
+static int stm32_usart_poll_init(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+
+ return clk_prepare_enable(stm32_port->clk);
+}
+
+static int stm32_usart_poll_get_char(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
+ return NO_POLL_CHAR;
+
+ return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
+}
+
+static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ stm32_usart_console_putchar(port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static const struct uart_ops stm32_uart_ops = {
.tx_empty = stm32_usart_tx_empty,
.set_mctrl = stm32_usart_set_mctrl,
@@ -1243,6 +1330,11 @@ static const struct uart_ops stm32_uart_ops = {
.request_port = stm32_usart_request_port,
.config_port = stm32_usart_config_port,
.verify_port = stm32_usart_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_init = stm32_usart_poll_init,
+ .poll_get_char = stm32_usart_poll_get_char,
+ .poll_put_char = stm32_usart_poll_put_char,
+#endif /* CONFIG_CONSOLE_POLL */
};
/*
@@ -1640,18 +1732,24 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_SERIAL_STM32_CONSOLE
-static void stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ u32 isr;
+ int ret;
- while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
- cpu_relax();
-
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
+ (isr & USART_SR_TXE), 100,
+ STM32_USART_TIMEOUT_USEC);
+ if (ret != 0) {
+ dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
+ return;
+ }
writel_relaxed(ch, port->membase + ofs->tdr);
}
+#ifdef CONFIG_SERIAL_STM32_CONSOLE
static void stm32_usart_console_write(struct console *co, const char *s,
unsigned int cnt)
{
@@ -1727,6 +1825,57 @@ static struct console stm32_console = {
#define STM32_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_STM32_CONSOLE */
+#ifdef CONFIG_SERIAL_EARLYCON
+static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+{
+ struct stm32_usart_info *info = port->private_data;
+
+ while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
+ cpu_relax();
+
+ writel_relaxed(ch, port->membase + info->ofs.tdr);
+}
+
+static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
+{
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
+
+ uart_console_write(port, s, count, early_stm32_usart_console_putchar);
+}
+
+static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32h7_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32f7_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32f4_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
+#endif /* CONFIG_SERIAL_EARLYCON */
+
static struct uart_driver stm32_usart_driver = {
.driver_name = DRIVER_NAME,
.dev_name = STM32_SERIAL_NAME,
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index feab952aec16..ee69c203b926 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -251,6 +251,8 @@ struct stm32_usart_info stm32h7_info = {
#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */
#define TX_BUF_L RX_BUF_L /* dma tx buffer length */
+#define STM32_USART_TIMEOUT_USEC USEC_PER_SEC /* 1s timeout in µs */
+
struct stm32_port {
struct uart_port port;
struct clk *clk;
@@ -269,6 +271,7 @@ struct stm32_port {
bool hw_flow_control;
bool swap; /* swap RX & TX pins */
bool fifoen;
+ bool txdone;
int rxftcfg; /* RX FIFO threshold CFG */
int txftcfg; /* TX FIFO threshold CFG */
bool wakeup_src;
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 9f15922e681b..60c73662f955 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -498,7 +498,7 @@ static const struct uart_ops sunplus_uart_ops = {
};
#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
-struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
+static struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
static void sunplus_uart_console_putchar(struct uart_port *port,
unsigned char ch)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index c31389114b86..fff50b5b82eb 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -798,10 +798,8 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
cval |= UART_LCR_PARITY;
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
/*
* Work around a bug in the Oxford Semiconductor 952 rev B
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 007db67292a2..880e2afbb97b 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -321,7 +321,8 @@ static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
struct uartlite_data *pdata = port->private_data;
/* Set termios to what the hardware supports */
- termios->c_cflag &= ~(BRKINT | CSTOPB | PARENB | PARODD | CSIZE);
+ termios->c_iflag &= ~BRKINT;
+ termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CSIZE);
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 250a1d888eeb..9e01fe6c0ab8 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -313,41 +313,27 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
static void cdns_uart_handle_tx(void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
+ struct circ_buf *xmit = &port->state->xmit;
unsigned int numbytes;
- if (uart_circ_empty(&port->state->xmit)) {
+ if (uart_circ_empty(xmit)) {
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
- } else {
- numbytes = port->fifosize;
- while (numbytes && !uart_circ_empty(&port->state->xmit) &&
- !(readl(port->membase + CDNS_UART_SR) &
- CDNS_UART_SR_TXFULL)) {
- /*
- * Get the data from the UART circular buffer
- * and write it to the cdns_uart's TX_FIFO
- * register.
- */
- writel(
- port->state->xmit.buf[port->state->xmit.tail],
- port->membase + CDNS_UART_FIFO);
-
- port->icount.tx++;
-
- /*
- * Adjust the tail of the UART buffer and wrap
- * the buffer if it reaches limit.
- */
- port->state->xmit.tail =
- (port->state->xmit.tail + 1) &
- (UART_XMIT_SIZE - 1);
-
- numbytes--;
- }
+ return;
+ }
- if (uart_circ_chars_pending(
- &port->state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
+ numbytes = port->fifosize;
+ while (numbytes && !uart_circ_empty(xmit) &&
+ !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+
+ writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO);
+
+ port->icount.tx++;
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ numbytes--;
}
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
}
/**
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 70969bf9d82c..5bc58591665a 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -981,7 +981,7 @@ static const char *zs_type(struct uart_port *uport)
static void zs_release_port(struct uart_port *uport)
{
iounmap(uport->membase);
- uport->membase = 0;
+ uport->membase = NULL;
release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
}
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 25c558e65ece..9bc2a9265277 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1746,6 +1746,8 @@ static int hdlcdev_init(struct slgt_info *info)
*/
static void hdlcdev_exit(struct slgt_info *info)
{
+ if (!info->netdev)
+ return;
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 2884cd638d64..18e623325887 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -232,8 +232,10 @@ static void showacpu(void *dummy)
unsigned long flags;
/* Idle CPUs have no interesting backtrace. */
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(smp_processor_id())) {
+ pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
return;
+ }
raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
@@ -260,10 +262,13 @@ static void sysrq_handle_showallcpus(int key)
if (in_hardirq())
regs = get_irq_regs();
- if (regs) {
- pr_info("CPU%d:\n", smp_processor_id());
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ if (regs)
show_regs(regs);
- }
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
schedule_work(&sysrq_showallcpus);
}
}
@@ -274,6 +279,8 @@ static const struct sysrq_key_op sysrq_showallcpus_op = {
.action_msg = "Show backtrace of all active CPUs",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
+#else
+#define sysrq_showallcpus_op (*(const struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showregs(int key)
@@ -405,6 +412,7 @@ static const struct sysrq_key_op sysrq_moom_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
+#ifdef CONFIG_BLOCK
static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
@@ -415,6 +423,9 @@ static const struct sysrq_key_op sysrq_thaw_op = {
.action_msg = "Emergency Thaw of all frozen filesystems",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
+#else
+#define sysrq_thaw_op (*(const struct sysrq_key_op *)NULL)
+#endif
static void sysrq_handle_kill(int key)
{
@@ -468,17 +479,9 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
NULL, /* g */
NULL, /* h - reserved for help */
&sysrq_kill_op, /* i */
-#ifdef CONFIG_BLOCK
&sysrq_thaw_op, /* j */
-#else
- NULL, /* j */
-#endif
&sysrq_SAK_op, /* k */
-#ifdef CONFIG_SMP
&sysrq_showallcpus_op, /* l */
-#else
- NULL, /* l */
-#endif
&sysrq_showmem_op, /* m */
&sysrq_unrt_op, /* n */
/* o: This will often be registered as 'Off' at init time */
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index d903e111dbcb..3cd99ed7c710 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -61,11 +61,10 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
cbaud = termios->c_cflag & CBAUD;
-#ifdef BOTHER
/* Magic token for arbitrary speed via c_ispeed/c_ospeed */
if (cbaud == BOTHER)
return termios->c_ospeed;
-#endif
+
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
@@ -92,16 +91,15 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
speed_t tty_termios_input_baud_rate(struct ktermios *termios)
{
-#ifdef IBSHIFT
unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
if (cbaud == B0)
return tty_termios_baud_rate(termios);
-#ifdef BOTHER
+
/* Magic token for arbitrary speed via c_ispeed*/
if (cbaud == BOTHER)
return termios->c_ispeed;
-#endif
+
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
@@ -111,9 +109,6 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
-#else /* IBSHIFT */
- return tty_termios_baud_rate(termios);
-#endif /* IBSHIFT */
}
EXPORT_SYMBOL(tty_termios_input_baud_rate);
@@ -153,11 +148,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
termios->c_ispeed = ibaud;
termios->c_ospeed = obaud;
-#ifdef IBSHIFT
if (((termios->c_cflag >> IBSHIFT) & CBAUD) != B0)
ibinput = 1; /* An input speed was specified */
-#endif
-#ifdef BOTHER
+
/* If the user asked for a precise weird speed give a precise weird
* answer. If they asked for a Bfoo speed they may have problems
* digesting non-exact replies so fuzz a bit.
@@ -170,11 +163,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
}
if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
iclose = 0;
-#endif
+
termios->c_cflag &= ~CBAUD;
-#ifdef IBSHIFT
termios->c_cflag &= ~(CBAUD << IBSHIFT);
-#endif
/*
* Our goal is to find a close match to the standard baud rate
@@ -194,22 +185,16 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
/* For the case input == output don't set IBAUD bits
* if the user didn't do so.
*/
- if (ofound == i && !ibinput)
+ if (ofound == i && !ibinput) {
ifound = i;
-#ifdef IBSHIFT
- else {
+ } else {
ifound = i;
termios->c_cflag |= (baud_bits[i] << IBSHIFT);
}
-#endif
}
} while (++i < n_baud_table);
- /*
- * If we found no match then use BOTHER if provided or warn
- * the user their platform maintainer needs to wake up if not.
- */
-#ifdef BOTHER
+ /* If we found no match then use BOTHER. */
if (ofound == -1)
termios->c_cflag |= BOTHER;
/* Set exact input bits only if the input and output differ or the
@@ -217,10 +202,6 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
*/
if (ifound == -1 && (ibaud != obaud || ibinput))
termios->c_cflag |= (BOTHER << IBSHIFT);
-#else
- if (ifound == -1 || ofound == -1)
- pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n");
-#endif
}
EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 63181925ec1a..adae687f654b 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -562,10 +562,8 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
termios.c_cc[VKILL] = tmp.sg_kill;
set_sgflags(&termios, tmp.sg_flags);
/* Try and encode into Bfoo format */
-#ifdef BOTHER
tty_termios_encode_baud_rate(&termios, termios.c_ispeed,
termios.c_ospeed);
-#endif
up_write(&tty->termios_rwsem);
tty_set_termios(tty, &termios);
return 0;
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 80b86a7992b5..0d04287da098 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -215,8 +215,8 @@ int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
spin_unlock_irq(&p->sighand->siglock);
continue;
}
- __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
- __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ send_signal_locked(SIGHUP, SEND_SIG_PRIV, p, PIDTYPE_TGID);
+ send_signal_locked(SIGCONT, SEND_SIG_PRIV, p, PIDTYPE_TGID);
put_pid(p->signal->tty_old_pgrp); /* A noop */
spin_lock(&tty->ctrl.lock);
tty_pgrp = get_pid(tty->ctrl.pgrp);
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
new file mode 100644
index 000000000000..90226f72c158
--- /dev/null
+++ b/drivers/ufs/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# UFS subsystem configuration
+#
+
+menuconfig SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller"
+ depends on SCSI && SCSI_DMA
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select NLS
+ help
+ Enables support for UFS (Universal Flash Storage) host controllers.
+ A UFS host controller is an electronic component that is able to
+ communicate with a UFS card. UFS host controllers occur in
+ smartphones, laptops, digital cameras and also in cars.
+ The kernel module will be called ufshcd.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.rst>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
+
+if SCSI_UFSHCD
+
+source "drivers/ufs/core/Kconfig"
+
+source "drivers/ufs/host/Kconfig"
+
+endif
diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile
new file mode 100644
index 000000000000..5a199ef18d4c
--- /dev/null
+++ b/drivers/ufs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# The link order is important here. ufshcd-core must initialize
+# before vendor drivers.
+obj-$(CONFIG_SCSI_UFSHCD) += core/ host/
diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig
new file mode 100644
index 000000000000..e11978171403
--- /dev/null
+++ b/drivers/ufs/core/Kconfig
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Kernel configuration file for the UFS Host Controller core.
+#
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+config SCSI_UFS_BSG
+ bool "Universal Flash Storage BSG device node"
+ select BLK_DEV_BSGLIB
+ help
+ Universal Flash Storage (UFS) is SCSI transport specification for
+ accessing flash storage on digital cameras, mobile phones and
+ consumer electronic devices.
+ A UFS controller communicates with a UFS device by exchanging
+ UFS Protocol Information Units (UPIUs).
+ UPIUs can not only be used as a transport layer for the SCSI protocol
+ but are also used by the UFS native command set.
+ This transport driver supports exchanging UFS protocol information units
+ with a UFS device. See also the ufshcd driver, which is a SCSI driver
+ that supports UFS devices.
+
+ Select this if you need a bsg device node for your UFS controller.
+ If unsure, say N.
+
+config SCSI_UFS_CRYPTO
+ bool "UFS Crypto Engine Support"
+ depends on BLK_INLINE_ENCRYPTION
+ help
+ Enable Crypto Engine Support in UFS.
+ Enabling this makes it possible for the kernel to use the crypto
+ capabilities of the UFS device (if present) to perform crypto
+ operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+ bool "Support UFS Host Performance Booster"
+ help
+ The UFS HPB feature improves random read performance. It caches
+ L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+ read command by piggybacking physical page number for bypassing FTL (flash
+ translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+ bool "UFS Fault Injection Support"
+ depends on FAULT_INJECTION
+ help
+ Enable fault injection support in the UFS driver. This makes it easier
+ to test the UFS error handler and abort handler.
+
+config SCSI_UFS_HWMON
+ bool "UFS Temperature Notification"
+ depends on SCSI_UFSHCD=HWMON || HWMON=y
+ help
+ This provides support for UFS hardware monitoring. If enabled,
+ a hardware monitoring device will be created for the UFS device.
+
+ If unsure, say N.
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
new file mode 100644
index 000000000000..62f38c5bf857
--- /dev/null
+++ b/drivers/ufs/core/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c
index c10a8f09682b..e3baed6c70bd 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/ufs/core/ufs-debugfs.c
@@ -4,7 +4,7 @@
#include <linux/debugfs.h>
#include "ufs-debugfs.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static struct dentry *ufs_debugfs_root;
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/ufs/core/ufs-debugfs.h
index 97548a3f90eb..97548a3f90eb 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/ufs/core/ufs-debugfs.h
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 7ac7c4e7ff83..7ac7c4e7ff83 100644
--- a/drivers/scsi/ufs/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h
index 6d0cd8e10c87..6d0cd8e10c87 100644
--- a/drivers/scsi/ufs/ufs-fault-injection.h
+++ b/drivers/ufs/core/ufs-fault-injection.h
diff --git a/drivers/scsi/ufs/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index c38d9d98a86d..4c6a872b7a7c 100644
--- a/drivers/scsi/ufs/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -7,7 +7,7 @@
#include <linux/hwmon.h>
#include <linux/units.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
struct ufs_hwmon_data {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 8a3c6442f291..0a088b47d557 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -6,7 +6,7 @@
#include <linux/bitfield.h>
#include <asm/unaligned.h>
-#include "ufs.h"
+#include <ufs/ufs.h>
#include "ufs-sysfs.h"
#include "ufshcd-priv.h"
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h
index 8d94af3b8077..8d94af3b8077 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/ufs/core/ufs-sysfs.h
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 9e9b93867cab..b99e3f3dc4ef 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -9,7 +9,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/ufs/core/ufs_bsg.h
index 57712d2656d2..57712d2656d2 100644
--- a/drivers/scsi/ufs/ufs_bsg.h
+++ b/drivers/ufs/core/ufs_bsg.h
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 67402baf6fae..198360fe5e8e 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -3,7 +3,7 @@
* Copyright 2019 Google LLC
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-crypto.h"
/* Blk-crypto modes supported by UFS crypto */
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 9f98f18f9646..504cc841540b 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -7,9 +7,9 @@
#define _UFSHCD_CRYPTO_H
#include <scsi/scsi_cmnd.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
-#include "ufshci.h"
+#include <ufs/ufshci.h>
#ifdef CONFIG_SCSI_UFS_CRYPTO
diff --git a/drivers/scsi/ufs/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 38bc77d3dbbd..ffb01fc6de75 100644
--- a/drivers/scsi/ufs/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -4,7 +4,7 @@
#define _UFSHCD_PRIV_H_
#include <linux/pm_runtime.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 1fb3a8b9b03e..01fb4bad86be 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -26,8 +26,8 @@
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include "ufshcd-priv.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
#include "ufs-fault-injection.h"
@@ -8445,10 +8445,7 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
- if (info)
- return ufshcd_get_vreg(hba->dev, info->vdd_hba);
-
- return 0;
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
}
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 8882b47f76d3..de2bb8401bc4 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -17,7 +17,7 @@
#include "ufshcd-priv.h"
#include "ufshpb.h"
-#include "../sd.h"
+#include "../../scsi/sd.h"
#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
#define READ_TO_MS 1000
@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
req->timeout = 0;
req->end_io_data = umap_req;
+ req->end_io = ufshpb_umap_req_compl_fn;
ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
+ blk_execute_rq_nowait(req, true);
hpb->stats.umap_req_cnt++;
}
@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
blk_rq_append_bio(req, map_req->bio);
req->end_io_data = map_req;
+ req->end_io = ufshpb_map_req_compl_fn;
if (unlikely(last))
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
+ blk_execute_rq_nowait(req, true);
hpb->stats.map_req_cnt++;
return 0;
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/ufs/core/ufshpb.h
index 0d6e6004d783..0d6e6004d783 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/ufs/core/ufshpb.h
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/ufs/host/Kconfig
index 393b9a01da36..82590224da13 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
#
-# Kernel configuration file for the UFS Host Controller
+# Kernel configuration file for the UFS host controller drivers.
#
# Copyright (C) 2011-2013 Samsung India Software Operations
#
@@ -8,26 +8,6 @@
# Santosh Yaraganavi <santosh.sy@samsung.com>
# Vinayak Holikatti <h.vinayak@samsung.com>
-config SCSI_UFSHCD
- tristate "Universal Flash Storage Controller Driver Core"
- depends on SCSI && SCSI_DMA
- select PM_DEVFREQ
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select NLS
- help
- This selects the support for UFS devices in Linux, say Y and make
- sure that you know the name of your UFS host adapter (the card
- inside your computer that "speaks" the UFS protocol, also
- called UFS Host Controller), because you will be asked for it.
- The module will be called ufshcd.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.rst>.
- However, do not compile this as a module if your root file system
- (the one containing the directory /) is located on a UFS device.
-
-if SCSI_UFSHCD
-
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on PCI
@@ -122,24 +102,6 @@ config SCSI_UFS_TI_J721E
Selects this if you have TI platform with UFS controller.
If unsure, say N.
-config SCSI_UFS_BSG
- bool "Universal Flash Storage BSG device node"
- select BLK_DEV_BSGLIB
- help
- Universal Flash Storage (UFS) is SCSI transport specification for
- accessing flash storage on digital cameras, mobile phones and
- consumer electronic devices.
- A UFS controller communicates with a UFS device by exchanging
- UFS Protocol Information Units (UPIUs).
- UPIUs can not only be used as a transport layer for the SCSI protocol
- but are also used by the UFS native command set.
- This transport driver supports exchanging UFS protocol information units
- with a UFS device. See also the ufshcd driver, which is a SCSI driver
- that supports UFS devices.
-
- Select this if you need a bsg device node for your UFS controller.
- If unsure, say N.
-
config SCSI_UFS_EXYNOS
tristate "Exynos specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
@@ -150,38 +112,3 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
-
-config SCSI_UFS_CRYPTO
- bool "UFS Crypto Engine Support"
- depends on BLK_INLINE_ENCRYPTION
- help
- Enable Crypto Engine Support in UFS.
- Enabling this makes it possible for the kernel to use the crypto
- capabilities of the UFS device (if present) to perform crypto
- operations on data being transferred to/from the device.
-
-config SCSI_UFS_HPB
- bool "Support UFS Host Performance Booster"
- help
- The UFS HPB feature improves random read performance. It caches
- L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
- read command by piggybacking physical page number for bypassing FTL (flash
- translation layer)'s L2P address translation.
-
-config SCSI_UFS_FAULT_INJECTION
- bool "UFS Fault Injection Support"
- depends on FAULT_INJECTION
- help
- Enable fault injection support in the UFS driver. This makes it easier
- to test the UFS error handler and abort handler.
-
-config SCSI_UFS_HWMON
- bool "UFS Temperature Notification"
- depends on SCSI_UFSHCD=HWMON || HWMON=y
- help
- This provides support for UFS hardware monitoring. If enabled,
- a hardware monitoring device will be created for the UFS device.
-
- If unsure, say N.
-
-endif
diff --git a/drivers/scsi/ufs/Makefile b/drivers/ufs/host/Makefile
index 966048875b50..e4be54273c98 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -1,16 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# UFSHCD makefile
-
-# The link order is important here. ufshcd-core must initialize
-# before vendor drivers.
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
-ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index e05c0ae64eea..e05c0ae64eea 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c
index e635c211c783..92b8ad4b58fe 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/ufs/host/tc-dwc-g210-pci.c
@@ -7,7 +7,7 @@
* Authors: Joao Pinto <jpinto@synopsys.com>
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-dwc.h"
#include "tc-dwc-g210.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
index f15a84d0c176..f15a84d0c176 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c
index 7ef67c9fc5b8..deb93dbd83a4 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/ufs/host/tc-dwc-g210.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/ufs/host/tc-dwc-g210.h
index f7154012f5c7..f7154012f5c7 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.h
+++ b/drivers/ufs/host/tc-dwc-g210.h
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 122d650d0810..122d650d0810 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index ddb2d42605c5..a81d8cbd542f 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -18,10 +18,10 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "ufshci.h"
-#include "unipro.h"
+#include <ufs/ufshci.h>
+#include <ufs/unipro.h>
#include "ufs-exynos.h"
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 0b0a3d530ca6..0b0a3d530ca6 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 7046143063ee..2eed13bc82ca 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -15,12 +15,12 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#include "ufs-hisi.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/ufs/host/ufs-hisi.h
index 5a90c0f4e90c..5a90c0f4e90c 100644
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ b/drivers/ufs/host/ufs-hisi.h
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
index 895e82ea6ece..7e010848dc99 100644
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/host
#define TRACE_INCLUDE_FILE ufs-mediatek-trace
#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 083d6bd4d561..beabc3ccd30b 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -21,10 +21,10 @@
#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
#include "ufs-mediatek.h"
#define CREATE_TRACE_POINTS
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 414dca86c09f..414dca86c09f 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c
index 745e48ec598f..745e48ec598f 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/ufs/host/ufs-qcom-ice.c
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 4dcb232facaa..f10d4668814c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -15,12 +15,12 @@
#include <linux/reset-controller.h>
#include <linux/devfreq.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#include "ufs-qcom.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 771bc95d02c7..44466a395bb5 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -7,7 +7,7 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c
index a57973c8d2a1..e28a67e1e314 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/ufs/host/ufshcd-dwc.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h
index 43b70794e24f..ad91ea56662c 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.h
+++ b/drivers/ufs/host/ufshcd-dwc.h
@@ -10,7 +10,7 @@
#ifndef _UFSHCD_DWC_H
#define _UFSHCD_DWC_H
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
struct ufshcd_dme_attr_val {
u32 attr_sel;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 20af2fbc3af1..04166bda41da 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -9,7 +9,7 @@
* Vinayak Holikatti <h.vinayak@samsung.com>
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index f5313f407617..e7332cc65b1f 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index c33e28ac6ef6..43c2e412bd99 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -5,7 +5,7 @@
#ifndef UFSHCD_PLTFRM_H_
#define UFSHCD_PLTFRM_H_
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#define UFS_PWM_MODE 1
#define UFS_HS_MODE 2
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/ufs/host/ufshci-dwc.h
index 6c290e272106..6c290e272106 100644
--- a/drivers/scsi/ufs/ufshci-dwc.h
+++ b/drivers/ufs/host/ufshci-dwc.h
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
index 89c0fc7b0cbc..8f39cc8bb034 100644
--- a/drivers/uio/uio_dfl.c
+++ b/drivers/uio/uio_dfl.c
@@ -45,9 +45,11 @@ static int uio_dfl_probe(struct dfl_device *ddev)
}
#define FME_FEATURE_ID_ETH_GROUP 0x10
+#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
+ { FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
{ }
};
MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index e3a49d837609..362217189ef3 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1091,7 +1091,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride));
/* rx buffer size must be a positive multiple of the endpoint maxpacket */
- maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint, 0);
+ maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint);
if ((maxpacket < 1) || (maxpacket > UDSL_MAX_BUF_SIZE)) {
dev_err(dev, "%s: invalid endpoint %02x!\n", __func__,
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 53838e7d4eef..6db5cb1b2dbb 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -189,14 +189,12 @@ static int c67x00_drv_remove(struct platform_device *pdev)
c67x00_ll_release(c67x00);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res)
- free_irq(res->start, c67x00);
+ free_irq(res->start, c67x00);
iounmap(c67x00->hpi.base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
kfree(c67x00);
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index c7d3e907be81..a09fa68a6ce7 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -655,7 +655,7 @@ static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
usb_pipeout(urb->pipe));
remaining = urb->transfer_buffer_length - urb->actual_length;
- maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ maxps = usb_maxpacket(urb->dev, urb->pipe);
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
@@ -866,7 +866,7 @@ static inline int c67x00_end_of_data(struct c67x00_td *td)
if (unlikely(!act_bytes))
return 1; /* This was an empty packet */
- maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
+ maxps = usb_maxpacket(td_udev(td), td->pipe);
if (unlikely(act_bytes < maxps))
return 1; /* Smaller then full packet */
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d6d515d598dc..5c15c48952a6 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2038,7 +2038,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
u8 mult = 0;
int ret;
- buffering = CDNS3_EP_BUF_SIZE - 1;
+ buffering = priv_dev->ep_buf_size - 1;
cdns3_configure_dmult(priv_dev, priv_ep);
@@ -2057,7 +2057,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
break;
default:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
- mult = CDNS3_EP_ISO_HS_MULT - 1;
+ mult = priv_dev->ep_iso_burst - 1;
buffering = mult + 1;
}
@@ -2073,14 +2073,14 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
mult = 0;
max_packet_size = 1024;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
- maxburst = CDNS3_EP_ISO_SS_BURST - 1;
+ maxburst = priv_dev->ep_iso_burst - 1;
buffering = (mult + 1) *
(maxburst + 1);
if (priv_ep->interval > 1)
buffering++;
} else {
- maxburst = CDNS3_EP_BUF_SIZE - 1;
+ maxburst = priv_dev->ep_buf_size - 1;
}
break;
default:
@@ -2095,6 +2095,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
else
priv_ep->trb_burst_size = 16;
+ mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+
/* onchip buffer is only allocated before configuration */
if (!priv_dev->hw_configured_flag) {
ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
@@ -2961,6 +2965,40 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
return 0;
}
+/**
+ * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration
+ * @gadget: pointer to the USB gadget
+ *
+ * Used to record the maximum number of endpoints being used in a USB composite
+ * device. (across all configurations) This is to be used in the calculation
+ * of the TXFIFO sizes when resizing internal memory for individual endpoints.
+ * It will help ensured that the resizing logic reserves enough space for at
+ * least one max packet.
+ */
+static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ struct usb_ep *ep;
+ int n_in = 0;
+ int total;
+
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ if (ep->claimed && (ep->address & USB_DIR_IN))
+ n_in++;
+ }
+
+ /* 2KB are reserved for EP0, 1KB for out*/
+ total = 2 + n_in + 1;
+
+ if (total > priv_dev->onchip_buffers)
+ return -ENOMEM;
+
+ priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+ (priv_dev->onchip_buffers - 2) / (n_in + 1);
+
+ return 0;
+}
+
static const struct usb_gadget_ops cdns3_gadget_ops = {
.get_frame = cdns3_gadget_get_frame,
.wakeup = cdns3_gadget_wakeup,
@@ -2969,6 +3007,7 @@ static const struct usb_gadget_ops cdns3_gadget_ops = {
.udc_start = cdns3_gadget_udc_start,
.udc_stop = cdns3_gadget_udc_stop,
.match_ep = cdns3_gadget_match_ep,
+ .check_config = cdns3_gadget_check_config,
};
static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
index c5660f2c4293..fbe4a8e3aa89 100644
--- a/drivers/usb/cdns3/cdns3-gadget.h
+++ b/drivers/usb/cdns3/cdns3-gadget.h
@@ -562,15 +562,18 @@ struct cdns3_usb_regs {
/* Max burst size (used only in SS mode). */
#define EP_CFG_MAXBURST_MASK GENMASK(11, 8)
#define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK)
+#define EP_CFG_MAXBURST_MAX 15
/* ISO max burst. */
#define EP_CFG_MULT_MASK GENMASK(15, 14)
#define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK)
+#define EP_CFG_MULT_MAX 2
/* ISO max burst. */
#define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16)
#define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK)
/* Max number of buffered packets. */
#define EP_CFG_BUFFERING_MASK GENMASK(31, 27)
#define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK)
+#define EP_CFG_BUFFERING_MAX 15
/* EP_CMD - bitmasks */
/* Endpoint reset. */
@@ -1094,9 +1097,6 @@ struct cdns3_trb {
#define CDNS3_ENDPOINTS_MAX_COUNT 32
#define CDNS3_EP_ZLP_BUF_SIZE 1024
-#define CDNS3_EP_BUF_SIZE 4 /* KB */
-#define CDNS3_EP_ISO_HS_MULT 3
-#define CDNS3_EP_ISO_SS_BURST 3
#define CDNS3_MAX_NUM_DESCMISS_BUF 32
#define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */
#define CDNS3_WA2_NUM_BUFFERS 128
@@ -1333,6 +1333,9 @@ struct cdns3_device {
/*in KB */
u16 onchip_buffers;
u16 onchip_used_size;
+
+ u16 ep_buf_size;
+ u16 ep_iso_burst;
};
void cdns3_set_register_bit(void __iomem *ptr, u32 mask);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 3aa7f0a3ad71..d26ecd15be60 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -8,14 +8,6 @@
*/
/*
- * CMSPAR, some architectures can't have space and mark parity.
- */
-
-#ifndef CMSPAR
-#define CMSPAR 0
-#endif
-
-/*
* Major and minor numbers.
*/
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index d8b0041de612..2c14a9636056 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -228,8 +228,6 @@ static char *usb_dump_interface(int speed, char *start, char *end,
start = usb_dump_interface_descriptor(start, end, intfc, iface, setno);
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
- if (start > end)
- return start;
start = usb_dump_endpoint_descriptor(speed,
start, end, &desc->endpoint[i].desc);
}
@@ -302,8 +300,6 @@ static char *usb_dump_config(int speed, char *start, char *end,
intfc = config->intf_cache[i];
interface = config->interface[i];
for (j = 0; j < intfc->num_altsetting; j++) {
- if (start > end)
- return start;
start = usb_dump_interface(speed,
start, end, intfc, interface, j);
}
@@ -369,19 +365,11 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
{
int i;
- if (start > end)
- return start;
-
start = usb_dump_device_descriptor(start, end, &dev->descriptor);
- if (start > end)
- return start;
-
start = usb_dump_device_strings(start, end, dev);
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
- if (start > end)
- return start;
start = usb_dump_config(dev->speed,
start, end, dev->config + i,
/* active ? */
@@ -390,41 +378,6 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
return start;
}
-
-#ifdef PROC_EXTRA /* TBD: may want to add this code later */
-
-static char *usb_dump_hub_descriptor(char *start, char *end,
- const struct usb_hub_descriptor *desc)
-{
- int leng = USB_DT_HUB_NONVAR_SIZE;
- unsigned char *ptr = (unsigned char *)desc;
-
- if (start > end)
- return start;
- start += sprintf(start, "Interface:");
- while (leng && start <= end) {
- start += sprintf(start, " %02x", *ptr);
- ptr++; leng--;
- }
- *start++ = '\n';
- return start;
-}
-
-static char *usb_dump_string(char *start, char *end,
- const struct usb_device *dev, char *id, int index)
-{
- if (start > end)
- return start;
- start += sprintf(start, "Interface:");
- if (index <= dev->maxstring && dev->stringindex &&
- dev->stringindex[index])
- start += sprintf(start, "%s: %.100s ", id,
- dev->stringindex[index]);
- return start;
-}
-
-#endif /* PROC_EXTRA */
-
/*****************************************************************/
/* This is a recursive function. Parameters:
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 355ed33a2179..b87452e22835 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1533,22 +1533,23 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
{
int w;
- /* Remote wakeup is needed only when we actually go to sleep.
- * For things like FREEZE and QUIESCE, if the device is already
- * autosuspended then its current wakeup setting is okay.
+ /*
+ * For FREEZE/QUIESCE, disable remote wakeups so no interrupts get
+ * generated.
*/
if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
- if (udev->state != USB_STATE_SUSPENDED)
- udev->do_remote_wakeup = 0;
- return;
- }
+ w = 0;
- /* Enable remote wakeup if it is allowed, even if no interface drivers
- * actually want it.
- */
- w = device_may_wakeup(&udev->dev);
+ } else {
+ /*
+ * Enable remote wakeup if it is allowed, even if no interface
+ * drivers actually want it.
+ */
+ w = device_may_wakeup(&udev->dev);
+ }
- /* If the device is autosuspended with the wrong wakeup setting,
+ /*
+ * If the device is autosuspended with the wrong wakeup setting,
* autoresume now so the setting can be changed.
*/
if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 8176bc81a635..482dae72ef1c 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -15,7 +15,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/prom.h>
#endif
#include "usb.h"
@@ -616,10 +615,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = check_root_hub_suspended,
+ .freeze = hcd_pci_suspend,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
- .thaw = NULL,
+ .thaw = hcd_pci_resume,
.poweroff = hcd_pci_suspend,
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d9712c2602af..06eea8848ccc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2816,6 +2816,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2976,13 +2977,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
/* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
@@ -3020,6 +3034,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -3030,6 +3045,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3039,7 +3055,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1460857026e0..68e9121c1878 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1635,7 +1635,7 @@ static int hub_configure(struct usb_hub *hub,
* maxpktsize is defined in hcd.c's fake endpoint descriptors
* to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(hdev, pipe);
if (maxp > sizeof(*hub->buffer))
maxp = sizeof(*hub->buffer);
@@ -5511,7 +5511,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* Handle notifying userspace about hub over-current events */
static void port_over_current_notify(struct usb_port *port_dev)
{
- char *envp[3];
+ char *envp[3] = { NULL, NULL, NULL };
struct device *hub_dev;
char *port_dev_path;
@@ -5528,20 +5528,18 @@ static void port_over_current_notify(struct usb_port *port_dev)
envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
if (!envp[0])
- goto exit_path;
+ goto exit;
envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
port_dev->over_current_count);
if (!envp[1])
goto exit;
- envp[2] = NULL;
kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
- kfree(envp[1]);
exit:
+ kfree(envp[1]);
kfree(envp[0]);
-exit_path:
kfree(port_dev_path);
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 97b44a68668a..f99a65a64588 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -510,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* DELL USB GEN2 */
+ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+
/* VCOM device */
{ USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index bb1da35eb891..d4dcaefd0ea4 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -205,8 +205,11 @@ usb_acpi_find_companion_for_device(struct usb_device *udev)
struct usb_hub *hub;
if (!udev->parent) {
- /* root hub is only child (_ADR=0) under its parent, the HC */
- adev = ACPI_COMPANION(udev->dev.parent);
+ /*
+ * root hub is only child (_ADR=0) under its parent, the HC.
+ * sysdev pointer is the HC as seen from firmware.
+ */
+ adev = ACPI_COMPANION(udev->bus->sysdev);
return acpi_find_child_device(adev, 0, false);
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index cf0bcd0dc320..dc4fc72ab1b6 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1153,6 +1153,7 @@ static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ u32 otgctl;
int retval = 0;
if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -1187,6 +1188,14 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
+ if (!hsotg->params.activate_ingenic_overcurrent_detection) {
+ if (dwc2_is_host_mode(hsotg)) {
+ otgctl = readl(hsotg->regs + GOTGCTL);
+ otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
+ writel(otgctl, hsotg->regs + GOTGCTL);
+ }
+ }
+
return retval;
}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 88c337bf564f..0683852e47e4 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -426,6 +426,10 @@ enum dwc2_ep0_state {
* detection using GGPIO register.
* 0 - Deactivate the external level detection (default)
* 1 - Activate the external level detection
+ * @activate_ingenic_overcurrent_detection: Activate Ingenic overcurrent
+ * detection.
+ * 0 - Deactivate the overcurrent detection
+ * 1 - Activate the overcurrent detection (default)
* @g_dma: Enables gadget dma usage (default: autodetect).
* @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
* @g_rx_fifo_size: The periodic rx fifo size for the device, in
@@ -494,6 +498,7 @@ struct dwc2_core_params {
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool activate_stm_id_vb_detection;
+ bool activate_ingenic_overcurrent_detection;
bool ipg_isoc_en;
u16 max_packet_count;
u32 max_transfer_size;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index eee3504397e6..fe2a58c75861 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4544,7 +4544,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
WARN_ON(hsotg->driver);
- driver->driver.bus = NULL;
hsotg->driver = driver;
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 1306f4ec788d..fdb8a42fff86 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -73,6 +73,47 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
+static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x2000_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 1024;
+ p->host_nperio_tx_fifo_size = 1024;
+ p->host_perio_tx_fifo_size = 1024;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -221,7 +262,14 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
- { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+ { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+ { .compatible = "ingenic,jz4775-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,jz4780-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1000-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1600-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1700-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1830-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x2000-otg", .data = dwc2_set_x2000_params },
{ .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params },
{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index c483f28b695d..cd9a734522a7 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -159,4 +159,13 @@ config USB_DWC3_XILINX
This driver handles both ZynqMP and Versal SoC operations.
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_AM62
+ tristate "Texas Instruments AM62 Platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support TI's AM62 platforms with DesignWare Core USB3 IP.
+ The Designware Core USB3 IP is progammed to operate in
+ in USB 2.0 mode only.
+ Say 'Y' or 'M' here if you have one such device
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 2d499de6f66a..9f66bd82b639 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -42,6 +42,7 @@ endif
# and allyesconfig builds.
##
+obj-$(CONFIG_USB_DWC3_AM62) += dwc3-am62.o
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d28cd1a6709b..e027c0420dc3 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
@@ -85,7 +86,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* mode. If the controller supports DRD but the dr_mode is not
* specified or set to OTG, then set the mode to peripheral.
*/
- if (mode == USB_DR_MODE_OTG &&
+ if (mode == USB_DR_MODE_OTG && !dwc->edev &&
(!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
!device_property_read_bool(dwc->dev, "usb-role-switch")) &&
!DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -297,6 +298,7 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
udelay(1);
} while (--retries);
+ dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
return -ETIMEDOUT;
done:
@@ -342,7 +344,6 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
* from the default, this will set clock period in DWC3_GUCTL
* register.
* @dwc: Pointer to our controller context structure
- * @ref_clk_per: reference clock period in ns
*/
static void dwc3_ref_clk_period(struct dwc3 *dwc)
{
@@ -964,10 +965,8 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
return;
vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
- if (!vals) {
- dev_err(dev, "Error to get memory\n");
+ if (!vals)
return;
- }
/* Get INCR burst type, and parse it */
ret = device_property_read_u32_array(dev,
@@ -1268,40 +1267,36 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
- if (ret == -ENXIO || ret == -ENODEV) {
+ if (ret == -ENXIO || ret == -ENODEV)
dwc->usb2_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
- }
}
if (IS_ERR(dwc->usb3_phy)) {
ret = PTR_ERR(dwc->usb3_phy);
- if (ret == -ENXIO || ret == -ENODEV) {
+ if (ret == -ENXIO || ret == -ENODEV)
dwc->usb3_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
- }
}
dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
if (IS_ERR(dwc->usb2_generic_phy)) {
ret = PTR_ERR(dwc->usb2_generic_phy);
- if (ret == -ENOSYS || ret == -ENODEV) {
+ if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb2_generic_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
- }
}
dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
if (IS_ERR(dwc->usb3_generic_phy)) {
ret = PTR_ERR(dwc->usb3_generic_phy);
- if (ret == -ENOSYS || ret == -ENODEV) {
+ if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb3_generic_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
- }
}
return 0;
@@ -1633,6 +1628,51 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
}
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
+ const char *name;
+
+ if (device_property_read_bool(dev, "extcon"))
+ return extcon_get_edev_by_phandle(dev, 0);
+
+ /*
+ * Device tree platforms should get extcon via phandle.
+ * On ACPI platforms, we get the name from a device property.
+ * This device property is for kernel internal use only and
+ * is expected to be set by the glue code.
+ */
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+ edev = extcon_get_extcon_dev(name);
+ if (!edev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return edev;
+ }
+
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
+ np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
+
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
+ of_node_put(np_phy);
+
+ return edev;
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1768,6 +1808,13 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
+ dwc->edev = dwc3_get_extcon(dwc);
+ if (IS_ERR(dwc->edev)) {
+ ret = PTR_ERR(dwc->edev);
+ dev_err_probe(dwc->dev, ret, "failed to get extcon\n");
+ goto err3;
+ }
+
ret = dwc3_get_dr_mode(dwc);
if (ret)
goto err3;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 5c9d467195a6..81c486b3941c 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1046,6 +1046,7 @@ struct dwc3_scratchpad_array {
* @tx_thr_num_pkt_prd: periodic ESS transmit packet count
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
+ * @clear_stall_protocol: endpoint number that requires a delayed status phase
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
* @softconnect: true when gadget connect is called, false when disconnect runs
@@ -1266,6 +1267,7 @@ struct dwc3 {
u8 tx_thr_num_pkt_prd;
u8 tx_max_burst_prd;
u8 tx_fifo_resize_max_num;
+ u8 clear_stall_protocol;
const char *hsphy_interface;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 8cad9e7d3368..039bf241769a 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -8,7 +8,6 @@
*/
#include <linux/extcon.h>
-#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -439,51 +438,6 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
- struct device *dev = dwc->dev;
- struct device_node *np_phy;
- struct extcon_dev *edev = NULL;
- const char *name;
-
- if (device_property_read_bool(dev, "extcon"))
- return extcon_get_edev_by_phandle(dev, 0);
-
- /*
- * Device tree platforms should get extcon via phandle.
- * On ACPI platforms, we get the name from a device property.
- * This device property is for kernel internal use only and
- * is expected to be set by the glue code.
- */
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
- edev = extcon_get_extcon_dev(name);
- if (!edev)
- return ERR_PTR(-EPROBE_DEFER);
-
- return edev;
- }
-
- /*
- * Try to get an extcon device from the USB PHY controller's "port"
- * node. Check if it has the "port" node first, to avoid printing the
- * error message from underlying code, as it's a valid case: extcon
- * device (and "port" node) may be missing in case of "usb-role-switch"
- * or OTG mode.
- */
- np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- if (of_graph_is_present(np_phy)) {
- struct device_node *np_conn;
-
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- of_node_put(np_conn);
- }
- of_node_put(np_phy);
-
- return edev;
-}
-
#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
#define ROLE_SWITCH 1
static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -588,10 +542,6 @@ int dwc3_drd_init(struct dwc3 *dwc)
device_property_read_bool(dwc->dev, "usb-role-switch"))
return dwc3_setup_role_switch(dwc);
- dwc->edev = dwc3_get_extcon(dwc);
- if (IS_ERR(dwc->edev))
- return PTR_ERR(dwc->edev);
-
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
new file mode 100644
index 000000000000..fea7aca35dc8
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+
+/* USB WRAPPER register offsets */
+#define USBSS_PID 0x0
+#define USBSS_OVERCURRENT_CTRL 0x4
+#define USBSS_PHY_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_CORE_STAT 0x14
+#define USBSS_HOST_VBUS_CTRL 0x18
+#define USBSS_MODE_CONTROL 0x1c
+#define USBSS_WAKEUP_CONFIG 0x30
+#define USBSS_WAKEUP_STAT 0x34
+#define USBSS_OVERRIDE_CONFIG 0x38
+#define USBSS_IRQ_MISC_STATUS_RAW 0x430
+#define USBSS_IRQ_MISC_STATUS 0x434
+#define USBSS_IRQ_MISC_ENABLE_SET 0x438
+#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c
+#define USBSS_IRQ_MISC_EOI 0x440
+#define USBSS_INTR_TEST 0x490
+#define USBSS_VBUS_FILTER 0x614
+#define USBSS_VBUS_STAT 0x618
+#define USBSS_DEBUG_CFG 0x708
+#define USBSS_DEBUG_DATA 0x70c
+#define USBSS_HOST_HUB_CTRL 0x714
+
+/* PHY CONFIG register bits */
+#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS_PHY_VBUS_SEL_SHIFT 1
+#define USBSS_PHY_LANE_REVERSE BIT(0)
+
+/* MODE CONTROL register bits */
+#define USBSS_MODE_VALID BIT(0)
+
+/* WAKEUP CONFIG register bits */
+#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3)
+#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2)
+#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1)
+#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0)
+
+/* WAKEUP STAT register bits */
+#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4)
+#define USBSS_WAKEUP_STAT_LINESTATE BIT(3)
+#define USBSS_WAKEUP_STAT_SESSVALID BIT(2)
+#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1)
+#define USBSS_WAKEUP_STAT_CLR BIT(0)
+
+/* IRQ_MISC_STATUS_RAW register bits */
+#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20)
+
+/* IRQ_MISC_STATUS register bits */
+#define USBSS_IRQ_MISC_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_SET register bits */
+#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_CLR register bits */
+#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20)
+
+/* IRQ_MISC_EOI register bits */
+#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0)
+
+/* VBUS_STAT register bits */
+#define USBSS_VBUS_STAT_SESSVALID BIT(2)
+#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
+
+/* Mask for PHY PLL REFCLK */
+#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+
+#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+
+struct dwc3_data {
+ struct device *dev;
+ void __iomem *usbss;
+ struct clk *usb2_refclk;
+ int rate_code;
+ struct regmap *syscon;
+ unsigned int offset;
+ unsigned int vbus_divider;
+};
+
+static const int dwc3_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+{
+ return readl((data->usbss) + offset);
+}
+
+static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+{
+ writel(value, (data->usbss) + offset);
+}
+
+static int phy_syscon_pll_refclk(struct dwc3_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ data->syscon = syscon;
+
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+ 0, &args);
+ if (ret)
+ return ret;
+
+ data->offset = args.args[0];
+
+ ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+ if (ret) {
+ dev_err(dev, "failed to set phy pll reference clock rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_data *data;
+ int i, ret;
+ unsigned long rate;
+ u32 reg;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ /* Calculate the rate code */
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; // To KHz
+ for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+ if (dwc3_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ data->rate_code = i;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(data);
+ if (ret)
+ goto err_clk_disable;
+
+ /* VBUS divider select */
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+ if (data->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Don't ignore its dependencies with its children
+ */
+ pm_suspend_ignore_children(dev, false);
+ clk_prepare_enable(data->usb2_refclk);
+ pm_runtime_get_noresume(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create dwc3 core: %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+err_pm_disable:
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+err_clk_disable:
+ clk_put(data->usb2_refclk);
+ return ret;
+}
+
+static int dwc3_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+static int dwc3_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dwc3_data *data = platform_get_drvdata(pdev);
+ u32 reg;
+
+ device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+
+ /* Clear mode valid bit */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg &= ~USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ pm_runtime_put_sync(dev);
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ clk_put(data->usb2_refclk);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_ti_suspend_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->usb2_refclk);
+
+ return 0;
+}
+
+static int dwc3_ti_resume_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(data->usb2_refclk);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
+ dwc3_ti_resume_common, NULL);
+
+#define DEV_PM_OPS (&dwc3_ti_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id dwc3_ti_of_match[] = {
+ { .compatible = "ti,am62-usb"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
+
+static struct platform_driver dwc3_ti_driver = {
+ .probe = dwc3_ti_probe,
+ .remove = dwc3_ti_remove,
+ .driver = {
+ .name = "dwc3-am62",
+ .pm = DEV_PM_OPS,
+ .of_match_table = dwc3_ti_of_match,
+ },
+};
+
+module_platform_driver(dwc3_ti_driver);
+
+MODULE_ALIAS("platform:dwc3-am62");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2e19e0e4ea53..ba51de7dd760 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -288,7 +288,7 @@ static void dwc3_pci_resume_work(struct work_struct *work)
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index a6f3a9b38789..67b237c7a76a 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -98,6 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
{
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
+ struct gpio_desc *reset_gpio;
struct phy *usb3_phy;
int ret = 0;
u32 reg;
@@ -201,6 +203,21 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
}
skip_usb3_phy:
+ /* ulpi reset via gpio-modepin or gpio-framework driver */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ if (reset_gpio) {
+ /* Toggle ulpi to reset the phy. */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 1064be5518f6..5d642660fd15 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -218,7 +218,7 @@ out:
return ret;
}
-static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -813,7 +813,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
int ret = -EINVAL;
u32 len;
- if (!dwc->gadget_driver)
+ if (!dwc->gadget_driver || !dwc->connected)
goto out;
trace_dwc3_ctrl_req(ctrl);
@@ -1080,6 +1080,7 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
unsigned int direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
+ dwc->clear_stall_protocol = 0;
if (dwc->ep0state != EP0_STATUS_PHASE)
return;
@@ -1087,13 +1088,18 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
}
-static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!dep->resource_index)
+ /*
+ * For status/DATA OUT stage, TRB will be queued on ep0 out
+ * endpoint for which resource index is zero. Hence allow
+ * queuing ENDXFER command for ep0 out endpoint.
+ */
+ if (!dep->resource_index && dep->number)
return;
cmd = DWC3_DEPCMD_ENDTRANSFER;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0b9c2493844a..00427d108ab9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -657,7 +657,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
/**
* dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
* @dwc: pointer to the DWC3 context
- * @nfifos: number of fifos to calculate for
*
* Calculates the size value based on the equation below:
*
@@ -690,7 +689,7 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
}
/**
- * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
* @dwc: pointer to the DWC3 context
*
* Iterates through all the endpoint registers and clears the previous txfifo
@@ -783,7 +782,8 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
num_fifos = 3;
if (dep->endpoint.maxburst > 6 &&
- usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
+ (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
num_fifos = dwc->tx_fifo_resize_max_num;
/* FIFO size for a single buffer */
@@ -882,12 +882,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+
if (usb_endpoint_xfer_control(desc))
goto out;
/* Initialize the TRB ring */
- dep->trb_dequeue = 0;
- dep->trb_enqueue = 0;
memset(dep->trb_pool, 0,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
@@ -2001,10 +2002,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
struct dwc3 *dwc = dep->dwc;
- list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
dwc3_gadget_ep_skip_trbs(dep, req);
switch (req->status) {
case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -2021,6 +2022,12 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
dwc3_gadget_giveback(dep, req, -ECONNRESET);
break;
}
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -2056,16 +2063,6 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
struct dwc3_request *t;
- /*
- * If a Setup packet is received but yet to DMA out, the controller will
- * not process the End Transfer command of any endpoint. Polling of its
- * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
- * timeout. Delay issuing the End Transfer command until the Setup TRB is
- * prepared.
- */
- if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status)
- dep->flags |= DWC3_EP_DELAY_STOP;
-
/* wait until it is processed */
dwc3_stop_active_transfer(dep, true, true);
@@ -2152,6 +2149,9 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
(dep->flags & DWC3_EP_DELAY_STOP)) {
dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
+ if (protocol)
+ dwc->clear_stall_protocol = dep->number;
+
return 0;
}
@@ -2498,28 +2498,64 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
-static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
- unsigned long flags;
- int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->connected = false;
- is_on = !!is_on;
- dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
*/
- if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ int ret;
+
reinit_completion(&dwc->ep0_in_setup);
+ spin_unlock_irqrestore(&dwc->lock, flags);
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ spin_lock_irqsave(&dwc->lock, flags);
if (ret == 0)
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
}
/*
+ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ __dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /*
+ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ * driver needs to acknowledge them before the controller can halt.
+ * Simply let the interrupt handler acknowledges and handle the
+ * remaining event generated by the controller while polling for
+ * DSTS.DEVCTLHLT.
+ */
+ return dwc3_gadget_run_stop(dwc, false, false);
+}
+
+static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ int ret;
+
+ is_on = !!is_on;
+
+ if (dwc->pullups_connected == is_on)
+ return 0;
+
+ dwc->softconnect = is_on;
+
+ /*
* Avoid issuing a runtime resume if the device is already in the
* suspended state during gadget disconnect. DWC3 gadget was already
* halted/stopped during runtime suspend.
@@ -2541,42 +2577,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
- /*
- * Synchronize and disable any further event handling while controller
- * is being enabled/disabled.
- */
- disable_irq(dwc->irq_gadget);
-
- spin_lock_irqsave(&dwc->lock, flags);
-
if (!is_on) {
- u32 count;
-
- dwc->connected = false;
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 4.1.8 Table 4-7, it states that for a device-initiated
- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
- * command for any active transfers" before clearing the RunStop
- * bit.
- */
- dwc3_stop_active_transfers(dwc);
- __dwc3_gadget_stop(dwc);
-
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
- * "software needs to acknowledge the events that are generated
- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
- * to be set to '1'."
- */
- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
- count &= DWC3_GEVNTCOUNT_MASK;
- if (count > 0) {
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
- dwc->ev_buf->length;
- }
+ ret = dwc3_gadget_soft_disconnect(dwc);
} else {
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
@@ -2584,18 +2586,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
- spin_unlock_irqrestore(&dwc->lock, flags);
dwc3_core_soft_reset(dwc);
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
+ ret = dwc3_gadget_run_stop(dwc, true, false);
}
- ret = dwc3_gadget_run_stop(dwc, is_on, false);
- spin_unlock_irqrestore(&dwc->lock, flags);
- enable_irq(dwc->irq_gadget);
-
pm_runtime_put(dwc->dev);
return ret;
@@ -2745,6 +2742,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->ep0_bounced = false;
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc->delayed_status = false;
dwc3_ep0_out_start(dwc);
@@ -3333,15 +3331,21 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
- list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
+ while (!list_empty(&dep->started_list)) {
int ret;
+ req = next_request(&dep->started_list);
ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
req, status);
if (ret)
break;
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -3380,14 +3384,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
- if (!dep->endpoint.desc)
- return no_started_trb;
-
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))
@@ -3512,7 +3516,7 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
}
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
- if (dwc->delayed_status)
+ if (dwc->clear_stall_protocol == dep->number)
dwc3_ep0_send_delayed_status(dwc);
}
@@ -3673,12 +3677,35 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
+ struct dwc3 *dwc = dep->dwc;
+
+ /*
+ * Only issue End Transfer command to the control endpoint of a started
+ * Data Phase. Typically we should only do so in error cases such as
+ * invalid/unexpected direction as described in the control transfer
+ * flow of the programming guide.
+ */
+ if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
+ return;
+
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
(dep->flags & DWC3_EP_DELAY_STOP) ||
(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
+ * If a Setup packet is received but yet to DMA out, the controller will
+ * not process the End Transfer command of any endpoint. Polling of its
+ * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
+ * timeout. Delay issuing the End Transfer command until the Setup TRB is
+ * prepared.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+ dep->flags |= DWC3_EP_DELAY_STOP;
+ return;
+ }
+
+ /*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
* EndTransfer Command Completion IRQ, but that's causing too
@@ -3795,6 +3822,27 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
}
dwc3_reset_gadget(dwc);
+
+ /*
+ * From SNPS databook section 8.1.2, the EP0 should be in setup
+ * phase. So ensure that EP0 is in setup phase by issuing a stall
+ * and restart if EP0 is not in setup phase.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ unsigned int dir;
+
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
+
+ dwc3_ep0_stall_and_restart(dwc);
+ }
+
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index f763380e672e..55a56cf67d73 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -110,6 +110,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index eda871973d6c..f56c30cf151e 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -7,7 +7,6 @@
* Authors: Felipe Balbi <balbi@ti.com>,
*/
-#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -83,7 +82,6 @@ int dwc3_host_init(struct dwc3 *dwc)
}
xhci->dev.parent = dwc->dev;
- ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev));
dwc->xhci = xhci;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2eaeaae96759..403563c06477 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2505,7 +2505,7 @@ int usb_composite_probe(struct usb_composite_driver *driver)
gadget_driver->driver.name = driver->name;
gadget_driver->max_speed = driver->max_speed;
- return usb_gadget_probe_driver(gadget_driver);
+ return usb_gadget_register_driver(gadget_driver);
}
EXPORT_SYMBOL_GPL(usb_composite_probe);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 84b73cb03f87..3a6b4926193e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -284,7 +284,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
goto err;
}
gi->composite.gadget_driver.udc_name = name;
- ret = usb_gadget_probe_driver(&gi->composite.gadget_driver);
+ ret = usb_gadget_register_driver(&gi->composite.gadget_driver);
if (ret) {
gi->composite.gadget_driver.udc_name = NULL;
goto err;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 349945e064bb..411eb489e0ff 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -333,6 +333,8 @@ static void acm_complete_set_line_coding(struct usb_ep *ep,
}
}
+static int acm_send_break(struct gserial *port, int duration);
+
static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_acm *acm = func_to_acm(f);
@@ -391,6 +393,14 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
acm->port_handshake_bits = w_value;
break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SEND_BREAK:
+ if (w_index != acm->ctrl_id)
+ goto invalid;
+
+ acm_send_break(&acm->port, w_value);
+ break;
+
default:
invalid:
dev_vdbg(&cdev->gadget->dev,
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d37965867b23..d3feeeb50841 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -24,7 +24,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
-#include "u_uvc.h"
#include "uvc.h"
#include "uvc_configfs.h"
#include "uvc_v4l2.h"
@@ -44,7 +43,7 @@ MODULE_PARM_DESC(trace, "Trace level bitmask");
#define UVC_STRING_STREAMING_IDX 1
static struct usb_string uvc_en_us_strings[] = {
- [UVC_STRING_CONTROL_IDX].s = "UVC Camera",
+ /* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */
[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
{ }
};
@@ -676,6 +675,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
us = usb_gstrings_attach(cdev, uvc_function_strings,
ARRAY_SIZE(uvc_en_us_strings));
if (IS_ERR(us)) {
@@ -866,6 +866,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
+ snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
ret = uvcg_attach_configfs(opts);
if (ret < 0) {
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 2bb569895a90..c1f62e91b012 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1179,8 +1179,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
if (c_chmask) {
struct uac_rtd_params *prm = &uac->c_prm;
- spin_lock_init(&prm->lock);
- uac->c_prm.uac = uac;
+ spin_lock_init(&prm->lock);
+ uac->c_prm.uac = uac;
prm->max_psize = g_audio->out_ep_maxpsize;
prm->srate = params->c_srates[0];
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 9a01a7d4f17f..24b8681b0d6f 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -27,6 +27,7 @@ struct f_uvc_opts {
unsigned int control_interface;
unsigned int streaming_interface;
+ char function_name[32];
/*
* Control descriptors array pointers for full-/high-speed and
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 886103a1fe9b..58e383afdd44 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -80,6 +80,7 @@ struct uvc_request {
struct uvc_video *video;
struct sg_table sgt;
u8 header[UVCG_REQUEST_HEADER_LEN];
+ struct uvc_buffer *last_buf;
};
struct uvc_video {
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 77d64031aa9c..e5a6b6e36b3d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -10,17 +10,14 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#include <linux/sort.h>
-
-#include "u_uvc.h"
#include "uvc_configfs.h"
+#include <linux/sort.h>
+
/* -----------------------------------------------------------------------------
* Global Utility Structures and Macros
*/
-#define UVCG_STREAMING_CONTROL_SIZE 1
-
#define UVC_ATTR(prefix, cname, aname) \
static struct configfs_attribute prefix##attr_##cname = { \
.ca_name = __stringify(aname), \
@@ -49,12 +46,6 @@ static int uvcg_config_compare_u32(const void *l, const void *r)
return li < ri ? -1 : li == ri ? 0 : 1;
}
-static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
-{
- return container_of(to_config_group(item), struct f_uvc_opts,
- func_inst.group);
-}
-
struct uvcg_config_group_type {
struct config_item_type type;
const char *name;
@@ -125,19 +116,6 @@ static void uvcg_config_remove_children(struct config_group *group)
* control/header
*/
-DECLARE_UVC_HEADER_DESCRIPTOR(1);
-
-struct uvcg_control_header {
- struct config_item item;
- struct UVC_HEADER_DESCRIPTOR(1) desc;
- unsigned linked;
-};
-
-static struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
-{
- return container_of(item, struct uvcg_control_header, item);
-}
-
#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
struct config_item *item, char *page) \
@@ -769,24 +747,6 @@ static const char * const uvcg_format_names[] = {
"mjpeg",
};
-enum uvcg_format_type {
- UVCG_UNCOMPRESSED = 0,
- UVCG_MJPEG,
-};
-
-struct uvcg_format {
- struct config_group group;
- enum uvcg_format_type type;
- unsigned linked;
- unsigned num_frames;
- __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
-};
-
-static struct uvcg_format *to_uvcg_format(struct config_item *item)
-{
- return container_of(to_config_group(item), struct uvcg_format, group);
-}
-
static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
{
struct f_uvc_opts *opts;
@@ -845,29 +805,11 @@ end:
return ret;
}
-struct uvcg_format_ptr {
- struct uvcg_format *fmt;
- struct list_head entry;
-};
-
/* -----------------------------------------------------------------------------
* streaming/header/<NAME>
* streaming/header
*/
-struct uvcg_streaming_header {
- struct config_item item;
- struct uvc_input_header_descriptor desc;
- unsigned linked;
- struct list_head formats;
- unsigned num_fmt;
-};
-
-static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
-{
- return container_of(item, struct uvcg_streaming_header, item);
-}
-
static void uvcg_format_set_indices(struct config_group *fmt);
static int uvcg_streaming_header_allow_link(struct config_item *src,
@@ -1059,31 +1001,6 @@ static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
* streaming/<mode>/<format>/<NAME>
*/
-struct uvcg_frame {
- struct config_item item;
- enum uvcg_format_type fmt_type;
- struct {
- u8 b_length;
- u8 b_descriptor_type;
- u8 b_descriptor_subtype;
- u8 b_frame_index;
- u8 bm_capabilities;
- u16 w_width;
- u16 w_height;
- u32 dw_min_bit_rate;
- u32 dw_max_bit_rate;
- u32 dw_max_video_frame_buffer_size;
- u32 dw_default_frame_interval;
- u8 b_frame_interval_type;
- } __attribute__((packed)) frame;
- u32 *dw_frame_interval;
-};
-
-static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
-{
- return container_of(item, struct uvcg_frame, item);
-}
-
#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
@@ -1345,6 +1262,7 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
+ struct uvcg_frame_ptr *frame_ptr;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -1375,6 +1293,16 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
kfree(h);
return ERR_PTR(-EINVAL);
}
+
+ frame_ptr = kzalloc(sizeof(*frame_ptr), GFP_KERNEL);
+ if (!frame_ptr) {
+ mutex_unlock(&opts->lock);
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ frame_ptr->frm = h;
+ list_add_tail(&frame_ptr->entry, &fmt->frames);
++fmt->num_frames;
mutex_unlock(&opts->lock);
@@ -1388,13 +1316,23 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
+ struct uvcg_frame *target_frm = NULL;
+ struct uvcg_frame_ptr *frame_ptr, *tmp;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
+ target_frm = container_of(item, struct uvcg_frame, item);
fmt = to_uvcg_format(&group->cg_item);
- --fmt->num_frames;
+
+ list_for_each_entry_safe(frame_ptr, tmp, &fmt->frames, entry)
+ if (frame_ptr->frm == target_frm) {
+ list_del(&frame_ptr->entry);
+ kfree(frame_ptr);
+ --fmt->num_frames;
+ break;
+ }
mutex_unlock(&opts->lock);
config_item_put(item);
@@ -1420,18 +1358,6 @@ static void uvcg_format_set_indices(struct config_group *fmt)
* streaming/uncompressed/<NAME>
*/
-struct uvcg_uncompressed {
- struct uvcg_format fmt;
- struct uvc_format_uncompressed desc;
-};
-
-static struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
-{
- return container_of(
- container_of(to_config_group(item), struct uvcg_format, group),
- struct uvcg_uncompressed, fmt);
-}
-
static struct configfs_group_operations uvcg_uncompressed_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
@@ -1565,6 +1491,12 @@ uvcg_uncompressed_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
+ /* index values in uvc are never 0 */ \
+ if (!num) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ \
u->desc.aname = num; \
ret = len; \
end: \
@@ -1645,6 +1577,7 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
h->desc.bmInterfaceFlags = 0;
h->desc.bCopyProtect = 0;
+ INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_UNCOMPRESSED;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_uncompressed_type);
@@ -1669,18 +1602,6 @@ static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
* streaming/mjpeg/<NAME>
*/
-struct uvcg_mjpeg {
- struct uvcg_format fmt;
- struct uvc_format_mjpeg desc;
-};
-
-static struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
-{
- return container_of(
- container_of(to_config_group(item), struct uvcg_format, group),
- struct uvcg_mjpeg, fmt);
-}
-
static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
@@ -1758,6 +1679,12 @@ uvcg_mjpeg_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
+ /* index values in uvc are never 0 */ \
+ if (!num) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ \
u->desc.aname = num; \
ret = len; \
end: \
@@ -1831,6 +1758,7 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
h->desc.bmInterfaceFlags = 0;
h->desc.bCopyProtect = 0;
+ INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_MJPEG;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_mjpeg_type);
@@ -2425,10 +2353,51 @@ UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
+#define UVCG_OPTS_STRING_ATTR(cname, aname) \
+static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
+ char *page) \
+{ \
+ struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
+ const char *page, size_t len) \
+{ \
+ struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int ret = 0; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \
+ "%s", page); \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+UVC_ATTR(f_uvc_opts_string_, cname, aname)
+
+UVCG_OPTS_STRING_ATTR(function_name, function_name);
+
+#undef UVCG_OPTS_STRING_ATTR
+
static struct configfs_attribute *uvc_attrs[] = {
&f_uvc_opts_attr_streaming_interval,
&f_uvc_opts_attr_streaming_maxpacket,
&f_uvc_opts_attr_streaming_maxburst,
+ &f_uvc_opts_string_attr_function_name,
NULL,
};
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 7e1d7ca29bf2..ad2ec8c4c78c 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -12,7 +12,125 @@
#ifndef UVC_CONFIGFS_H
#define UVC_CONFIGFS_H
-struct f_uvc_opts;
+#include <linux/configfs.h>
+
+#include "u_uvc.h"
+
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uvc_opts,
+ func_inst.group);
+}
+
+#define UVCG_STREAMING_CONTROL_SIZE 1
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+struct uvcg_control_header {
+ struct config_item item;
+ struct UVC_HEADER_DESCRIPTOR(1) desc;
+ unsigned linked;
+};
+
+static inline struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_control_header, item);
+}
+
+enum uvcg_format_type {
+ UVCG_UNCOMPRESSED = 0,
+ UVCG_MJPEG,
+};
+
+struct uvcg_format {
+ struct config_group group;
+ enum uvcg_format_type type;
+ unsigned linked;
+ struct list_head frames;
+ unsigned num_frames;
+ __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+};
+
+struct uvcg_format_ptr {
+ struct uvcg_format *fmt;
+ struct list_head entry;
+};
+
+static inline struct uvcg_format *to_uvcg_format(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct uvcg_format, group);
+}
+
+struct uvcg_streaming_header {
+ struct config_item item;
+ struct uvc_input_header_descriptor desc;
+ unsigned linked;
+ struct list_head formats;
+ unsigned num_fmt;
+};
+
+static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_streaming_header, item);
+}
+
+struct uvcg_frame_ptr {
+ struct uvcg_frame *frm;
+ struct list_head entry;
+};
+
+struct uvcg_frame {
+ struct config_item item;
+ enum uvcg_format_type fmt_type;
+ struct {
+ u8 b_length;
+ u8 b_descriptor_type;
+ u8 b_descriptor_subtype;
+ u8 b_frame_index;
+ u8 bm_capabilities;
+ u16 w_width;
+ u16 w_height;
+ u32 dw_min_bit_rate;
+ u32 dw_max_bit_rate;
+ u32 dw_max_video_frame_buffer_size;
+ u32 dw_default_frame_interval;
+ u8 b_frame_interval_type;
+ } __attribute__((packed)) frame;
+ u32 *dw_frame_interval;
+};
+
+static inline struct uvcg_frame *to_uvcg_frame(struct config_item *item)
+{
+ return container_of(item, struct uvcg_frame, item);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
+struct uvcg_uncompressed {
+ struct uvcg_format fmt;
+ struct uvc_format_uncompressed desc;
+};
+
+static inline struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
+{
+ return container_of(to_uvcg_format(item), struct uvcg_uncompressed, fmt);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
+struct uvcg_mjpeg {
+ struct uvcg_format fmt;
+ struct uvc_format_mjpeg desc;
+};
+
+static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
+{
+ return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt);
+}
int uvcg_attach_configfs(struct f_uvc_opts *opts);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 2cda982f3765..d25edc3d2174 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -185,18 +185,7 @@ int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
- unsigned long flags;
- int ret;
-
- ret = vb2_qbuf(&queue->queue, NULL, buf);
- if (ret < 0)
- return ret;
-
- spin_lock_irqsave(&queue->irqlock, flags);
- ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
- queue->flags &= ~UVC_QUEUE_PAUSED;
- spin_unlock_irqrestore(&queue->irqlock, flags);
- return ret;
+ return vb2_qbuf(&queue->queue, NULL, buf);
}
/*
@@ -328,33 +317,22 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
}
/* called with &queue_irqlock held.. */
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf)
{
- struct uvc_buffer *nextbuf;
-
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
buf->length != buf->bytesused) {
buf->state = UVC_BUF_STATE_QUEUED;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
- return buf;
+ return;
}
- list_del(&buf->queue);
- if (!list_empty(&queue->irqqueue))
- nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
- queue);
- else
- nextbuf = NULL;
-
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = queue->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
-
- return nextbuf;
}
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
@@ -364,8 +342,6 @@ struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
if (!list_empty(&queue->irqqueue))
buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
- else
- queue->flags |= UVC_QUEUE_PAUSED;
return buf;
}
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 05360a0767f6..41f87b917f6b 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -43,7 +43,6 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
#define UVC_QUEUE_DROP_INCOMPLETE (1 << 1)
-#define UVC_QUEUE_PAUSED (1 << 2)
struct uvc_video_queue {
struct vb2_queue queue;
@@ -93,7 +92,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 7f59a0c47402..a9bb4553db84 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -112,7 +112,8 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
video->payload_size = 0;
@@ -154,7 +155,7 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
sg = sg_next(sg);
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
- if (!len || !buf->sg)
+ if (!len || !buf->sg || !sg_dma_len(buf->sg))
break;
sg_left = sg_dma_len(buf->sg) - buf->offset;
@@ -183,8 +184,9 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
buf->offset = 0;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
video->fid ^= UVC_STREAM_FID;
+ ureq->last_buf = buf;
}
}
@@ -210,7 +212,8 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
}
}
@@ -264,6 +267,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
uvcg_queue_cancel(queue, 0);
}
+ if (ureq->last_buf) {
+ uvcg_complete_buffer(&video->queue, ureq->last_buf);
+ ureq->last_buf = NULL;
+ }
+
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
@@ -332,6 +340,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
video->ureq[i].req->complete = uvc_video_complete;
video->ureq[i].req->context = &video->ureq[i];
video->ureq[i].video = video;
+ video->ureq[i].last_buf = NULL;
list_add_tail(&video->ureq[i].req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 6bcbad382580..b62e45235e8e 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -422,7 +422,7 @@ static struct usb_gadget_driver dbgp_driver = {
static int __init dbgp_init(void)
{
- return usb_gadget_probe_driver(&dbgp_driver);
+ return usb_gadget_register_driver(&dbgp_driver);
}
static void __exit dbgp_exit(void)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 0c01e749f9ea..79990597c39f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1873,7 +1873,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
else
gadgetfs_driver.max_speed = USB_SPEED_FULL;
- value = usb_gadget_probe_driver(&gadgetfs_driver);
+ value = usb_gadget_register_driver(&gadgetfs_driver);
if (value != 0) {
spin_lock_irq(&dev->lock);
goto fail;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index e9440f7bf019..241740024c50 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -512,12 +512,12 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
- ret = usb_gadget_probe_driver(&dev->driver);
+ ret = usb_gadget_register_driver(&dev->driver);
spin_lock_irqsave(&dev->lock, flags);
if (ret) {
dev_err(dev->dev,
- "fail, usb_gadget_probe_driver returned %d\n", ret);
+ "fail, usb_gadget_register_driver returned %d\n", ret);
dev->state = STATE_DEV_FAILED;
goto out_unlock;
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 85b194011a16..7886497253cc 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/idr.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/sched/task_stack.h>
@@ -23,6 +24,10 @@
#include "trace.h"
+static DEFINE_IDA(gadget_id_numbers);
+
+static struct bus_type gadget_bus_type;
+
/**
* struct usb_udc - describes one usb device controller
* @driver: the gadget driver pointer. For use by the class code
@@ -47,11 +52,9 @@ struct usb_udc {
static struct class *udc_class;
static LIST_HEAD(udc_list);
-static LIST_HEAD(gadget_driver_pending_list);
-static DEFINE_MUTEX(udc_lock);
-static int udc_bind_to_driver(struct usb_udc *udc,
- struct usb_gadget_driver *driver);
+/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
+static DEFINE_MUTEX(udc_lock);
/* ------------------------------------------------------------------------- */
@@ -1238,38 +1241,16 @@ static void usb_udc_nop_release(struct device *dev)
dev_vdbg(dev, "%s\n", __func__);
}
-/* should be called with udc_lock held */
-static int check_pending_gadget_drivers(struct usb_udc *udc)
-{
- struct usb_gadget_driver *driver;
- int ret = 0;
-
- list_for_each_entry(driver, &gadget_driver_pending_list, pending)
- if (!driver->udc_name || strcmp(driver->udc_name,
- dev_name(&udc->dev)) == 0) {
- ret = udc_bind_to_driver(udc, driver);
- if (ret != -EPROBE_DEFER)
- list_del_init(&driver->pending);
- break;
- }
-
- return ret;
-}
-
/**
* usb_initialize_gadget - initialize a gadget and its embedded struct device
* @parent: the parent device to this udc. Usually the controller driver's
* device.
* @gadget: the gadget to be initialized.
* @release: a gadget release function.
- *
- * Returns zero on success, negative errno otherwise.
- * Calls the gadget release function in the latter case.
*/
void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
void (*release)(struct device *dev))
{
- dev_set_name(&gadget->dev, "gadget");
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
@@ -1279,6 +1260,7 @@ void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
gadget->dev.release = usb_udc_nop_release;
device_initialize(&gadget->dev);
+ gadget->dev.bus = &gadget_bus_type;
}
EXPORT_SYMBOL_GPL(usb_initialize_gadget);
@@ -1308,10 +1290,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
if (ret)
goto err_put_udc;
- ret = device_add(&gadget->dev);
- if (ret)
- goto err_put_udc;
-
udc->gadget = gadget;
gadget->udc = udc;
@@ -1319,6 +1297,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
+ mutex_unlock(&udc_lock);
ret = device_add(&udc->dev);
if (ret)
@@ -1327,25 +1306,30 @@ int usb_add_gadget(struct usb_gadget *gadget)
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
- /* pick up one of pending gadget drivers */
- ret = check_pending_gadget_drivers(udc);
- if (ret)
+ ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL);
+ if (ret < 0)
goto err_del_udc;
+ gadget->id_number = ret;
+ dev_set_name(&gadget->dev, "gadget.%d", ret);
- mutex_unlock(&udc_lock);
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_free_id;
return 0;
+ err_free_id:
+ ida_free(&gadget_id_numbers, gadget->id_number);
+
err_del_udc:
flush_work(&gadget->work);
device_del(&udc->dev);
err_unlist_udc:
+ mutex_lock(&udc_lock);
list_del(&udc->list);
mutex_unlock(&udc_lock);
- device_del(&gadget->dev);
-
err_put_udc:
put_device(&udc->dev);
@@ -1421,30 +1405,11 @@ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
-static void usb_gadget_remove_driver(struct usb_udc *udc)
-{
- dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
- udc->driver->function);
-
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
- usb_gadget_disconnect(udc->gadget);
- usb_gadget_disable_async_callbacks(udc);
- if (udc->gadget->irq)
- synchronize_irq(udc->gadget->irq);
- udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc);
-
- udc->driver = NULL;
- udc->gadget->dev.driver = NULL;
-}
-
/**
- * usb_del_gadget - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget - deletes a gadget and unregisters its udc
+ * @gadget: the gadget to be deleted.
*
- * This will call usb_gadget_unregister_driver() if
- * the @udc is still busy.
+ * This will unbind @gadget, if it is bound.
* It will not do a final usb_put_gadget().
*/
void usb_del_gadget(struct usb_gadget *gadget)
@@ -1458,25 +1423,19 @@ void usb_del_gadget(struct usb_gadget *gadget)
mutex_lock(&udc_lock);
list_del(&udc->list);
-
- if (udc->driver) {
- struct usb_gadget_driver *driver = udc->driver;
-
- usb_gadget_remove_driver(udc);
- list_add(&driver->pending, &gadget_driver_pending_list);
- }
mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
flush_work(&gadget->work);
- device_unregister(&udc->dev);
device_del(&gadget->dev);
+ ida_free(&gadget_id_numbers, gadget->id_number);
+ device_unregister(&udc->dev);
}
EXPORT_SYMBOL_GPL(usb_del_gadget);
/**
- * usb_del_gadget_udc - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget_udc - unregisters a gadget
+ * @gadget: the gadget to be unregistered.
*
* Calls usb_del_gadget() and does a final usb_put_gadget().
*/
@@ -1489,123 +1448,147 @@ EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
/* ------------------------------------------------------------------------- */
-static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+static int gadget_match_driver(struct device *dev, struct device_driver *drv)
{
- int ret;
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(drv,
+ struct usb_gadget_driver, driver);
- dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
- driver->function);
+ /* If the driver specifies a udc_name, it must match the UDC's name */
+ if (driver->udc_name &&
+ strcmp(driver->udc_name, dev_name(&udc->dev)) != 0)
+ return 0;
+
+ /* If the driver is already bound to a gadget, it doesn't match */
+ if (driver->is_bound)
+ return 0;
+
+ /* Otherwise any gadget driver matches any UDC */
+ return 1;
+}
+static int gadget_bind_driver(struct device *dev)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(dev->driver,
+ struct usb_gadget_driver, driver);
+ int ret = 0;
+
+ mutex_lock(&udc_lock);
+ if (driver->is_bound) {
+ mutex_unlock(&udc_lock);
+ return -ENXIO; /* Driver binds to only one gadget */
+ }
+ driver->is_bound = true;
udc->driver = driver;
- udc->gadget->dev.driver = &driver->driver;
+ mutex_unlock(&udc_lock);
+
+ dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function);
usb_gadget_udc_set_speed(udc, driver->max_speed);
+ mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
- goto err1;
+ goto err_bind;
+
ret = usb_gadget_udc_start(udc);
- if (ret) {
- driver->unbind(udc->gadget);
- goto err1;
- }
+ if (ret)
+ goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
+ mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
-err1:
+
+ err_start:
+ driver->unbind(udc->gadget);
+
+ err_bind:
if (ret != -EISNAM)
dev_err(&udc->dev, "failed to start %s: %d\n",
- udc->driver->function, ret);
+ driver->function, ret);
+
udc->driver = NULL;
- udc->gadget->dev.driver = NULL;
+ driver->is_bound = false;
+ mutex_unlock(&udc_lock);
+
return ret;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+static void gadget_unbind_driver(struct device *dev)
{
- struct usb_udc *udc = NULL, *iter;
- int ret = -ENODEV;
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = udc->driver;
+
+ dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+ mutex_lock(&udc_lock);
+ usb_gadget_disconnect(gadget);
+ usb_gadget_disable_async_callbacks(udc);
+ if (gadget->irq)
+ synchronize_irq(gadget->irq);
+ udc->driver->unbind(gadget);
+ usb_gadget_udc_stop(udc);
+
+ driver->is_bound = false;
+ udc->driver = NULL;
+ mutex_unlock(&udc_lock);
+}
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
if (!driver || !driver->bind || !driver->setup)
return -EINVAL;
+ driver->driver.bus = &gadget_bus_type;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+ ret = driver_register(&driver->driver);
+ if (ret) {
+ pr_warn("%s: driver registration failed: %d\n",
+ driver->function, ret);
+ return ret;
+ }
+
mutex_lock(&udc_lock);
- if (driver->udc_name) {
- list_for_each_entry(iter, &udc_list, list) {
- ret = strcmp(driver->udc_name, dev_name(&iter->dev));
- if (ret)
- continue;
- udc = iter;
- break;
- }
- if (ret)
- ret = -ENODEV;
- else if (udc->driver)
+ if (!driver->is_bound) {
+ if (driver->match_existing_only) {
+ pr_warn("%s: couldn't find an available UDC or it's busy\n",
+ driver->function);
ret = -EBUSY;
- else
- goto found;
- } else {
- list_for_each_entry(iter, &udc_list, list) {
- /* For now we take the first one */
- if (iter->driver)
- continue;
- udc = iter;
- goto found;
+ } else {
+ pr_info("%s: couldn't find an available UDC\n",
+ driver->function);
+ ret = 0;
}
}
-
- if (!driver->match_existing_only) {
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
- ret = 0;
- }
-
mutex_unlock(&udc_lock);
+
if (ret)
- pr_warn("couldn't find an available UDC or it's busy: %d\n", ret);
- return ret;
-found:
- ret = udc_bind_to_driver(udc, driver);
- mutex_unlock(&udc_lock);
+ driver_unregister(&driver->driver);
return ret;
}
-EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
- struct usb_udc *udc = NULL;
- int ret = -ENODEV;
-
if (!driver || !driver->unbind)
return -EINVAL;
- mutex_lock(&udc_lock);
- list_for_each_entry(udc, &udc_list, list) {
- if (udc->driver == driver) {
- usb_gadget_remove_driver(udc);
- usb_gadget_set_state(udc->gadget,
- USB_STATE_NOTATTACHED);
-
- /* Maybe there is someone waiting for this UDC? */
- check_pending_gadget_drivers(udc);
- /*
- * For now we ignore bind errors as probably it's
- * not a valid reason to fail other's gadget unbind
- */
- ret = 0;
- break;
- }
- }
-
- if (ret) {
- list_del(&driver->pending);
- ret = 0;
- }
- mutex_unlock(&udc_lock);
- return ret;
+ driver_unregister(&driver->driver);
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
@@ -1757,8 +1740,17 @@ static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static struct bus_type gadget_bus_type = {
+ .name = "gadget",
+ .probe = gadget_bind_driver,
+ .remove = gadget_unbind_driver,
+ .match = gadget_match_driver,
+};
+
static int __init usb_udc_init(void)
{
+ int rc;
+
udc_class = class_create(THIS_MODULE, "udc");
if (IS_ERR(udc_class)) {
pr_err("failed to create udc class --> %ld\n",
@@ -1767,12 +1759,17 @@ static int __init usb_udc_init(void)
}
udc_class->dev_uevent = usb_udc_uevent;
- return 0;
+
+ rc = bus_register(&gadget_bus_type);
+ if (rc)
+ class_destroy(udc_class);
+ return rc;
}
subsys_initcall(usb_udc_init);
static void __exit usb_udc_exit(void)
{
+ bus_unregister(&gadget_bus_type);
class_destroy(udc_class);
}
module_exit(usb_udc_exit);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 6a8884632273..c97cd4bc817c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -71,7 +71,7 @@ static ushort dma_ep = 1;
module_param(dma_ep, ushort, 0644);
/*
- * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
* mode 0 == Slow DREQ mode
* mode 1 == Fast DREQ mode
* mode 2 == Burst mode
@@ -97,7 +97,7 @@ module_param(fifo_mode, ushort, 0644);
/*
* enable_suspend: When enabled, the driver will respond to
* USB suspend requests by powering down the NET2272. Otherwise,
- * USB suspend requests will be ignored. This is acceptible for
+ * USB suspend requests will be ignored. This is acceptable for
* self-powered devices. For bus powered devices set this to 1.
*/
static ushort enable_suspend = 0;
@@ -288,7 +288,7 @@ static void net2272_ep_reset(struct net2272_ep *ep)
| (1 << LOCAL_OUT_ZLP)
| (1 << BUFFER_FLUSH));
- /* fifo size is handled seperately */
+ /* fifo size is handled separately */
}
static int net2272_disable(struct usb_ep *_ep)
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 051d024b369e..d6a68631354a 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -932,19 +932,11 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
static inline void
queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
{
- struct net2280_dma *end;
- dma_addr_t tmp;
-
/* swap new dummy for old, link; fill and maybe activate */
- end = ep->dummy;
- ep->dummy = req->td;
- req->td = end;
-
- tmp = ep->td_dma;
- ep->td_dma = req->td_dma;
- req->td_dma = tmp;
+ swap(ep->dummy, req->td);
+ swap(ep->td_dma, req->td_dma);
- end->dmadesc = cpu_to_le32 (ep->td_dma);
+ req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc(ep, req, valid);
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 5096d24915ce..61cabb9de6ae 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1470,7 +1470,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
if (!udc->ep0_in) {
stat = 0;
/* read next OUT packet of request, maybe
- * reactiviting the fifo; stall on errors.
+ * reactivating the fifo; stall on errors.
*/
stat = read_fifo(ep0, req);
if (!req || stat < 0) {
@@ -2609,6 +2609,8 @@ static void omap_udc_release(struct device *dev)
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
+ clk_unprepare(udc->hhc_clk);
+ clk_unprepare(udc->dc_clk);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
@@ -2773,8 +2775,8 @@ static int omap_udc_probe(struct platform_device *pdev)
hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
- clk_enable(dc_clk);
- clk_enable(hhc_clk);
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
udelay(100);
}
@@ -2783,8 +2785,8 @@ static int omap_udc_probe(struct platform_device *pdev)
hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
- clk_enable(dc_clk);
- clk_enable(hhc_clk);
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
udelay(100);
}
@@ -2932,8 +2934,8 @@ cleanup0:
usb_put_phy(xceiv);
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- clk_disable(hhc_clk);
- clk_disable(dc_clk);
+ clk_disable_unprepare(hhc_clk);
+ clk_disable_unprepare(dc_clk);
clk_put(hhc_clk);
clk_put(dc_clk);
}
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 6c414c99d01c..c593fc383481 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -44,10 +44,6 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-#endif
-
#define UDCCR 0x0000 /* UDC Control Register */
#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
@@ -1578,18 +1574,15 @@ lubbock_vbus_irq(int irq, void *_dev)
int vbus;
dev->stats.irqs++;
- switch (irq) {
- case LUBBOCK_USB_IRQ:
+ if (irq == dev->usb_irq) {
vbus = 1;
- disable_irq(LUBBOCK_USB_IRQ);
- enable_irq(LUBBOCK_USB_DISC_IRQ);
- break;
- case LUBBOCK_USB_DISC_IRQ:
+ disable_irq(dev->usb_irq);
+ enable_irq(dev->usb_disc_irq);
+ } else if (irq == dev->usb_disc_irq) {
vbus = 0;
- disable_irq(LUBBOCK_USB_DISC_IRQ);
- enable_irq(LUBBOCK_USB_IRQ);
- break;
- default:
+ disable_irq(dev->usb_disc_irq);
+ enable_irq(dev->usb_irq);
+ } else {
return IRQ_NONE;
}
@@ -2422,20 +2415,28 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_LUBBOCK
if (machine_is_lubbock()) {
- retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ,
+ dev->usb_irq = platform_get_irq(pdev, 1);
+ if (dev->usb_irq < 0)
+ return dev->usb_irq;
+
+ dev->usb_disc_irq = platform_get_irq(pdev, 2);
+ if (dev->usb_disc_irq < 0)
+ return dev->usb_disc_irq;
+
+ retval = devm_request_irq(&pdev->dev, dev->usb_disc_irq,
lubbock_vbus_irq, 0, driver_name,
dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
- driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+ driver_name, dev->usb_disc_irq, retval);
goto err;
}
- retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ,
+ retval = devm_request_irq(&pdev->dev, dev->usb_irq,
lubbock_vbus_irq, 0, driver_name,
dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
- driver_name, LUBBOCK_USB_IRQ, retval);
+ driver_name, dev->usb_irq, retval);
goto err;
}
} else
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index aa4b68fd9fc0..6ab6047edc83 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -117,16 +117,13 @@ struct pxa25x_udc {
u64 dma_mask;
struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
void __iomem *regs;
+ int usb_irq;
+ int usb_disc_irq;
};
#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-/* lubbock can also report usb connect/disconnect irqs */
-#endif
-
static struct pxa25x_udc *the_controller;
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 0a6bc18a1264..31bf79ce931c 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -326,7 +326,7 @@ struct udc_usb_ep {
* @addr: usb endpoint number
* @config: configuration in which this endpoint is active
* @interface: interface in which this endpoint is active
- * @alternate: altsetting in which this endpoitn is active
+ * @alternate: altsetting in which this endpoint is active
* @fifo_size: max packet size in the endpoint fifo
* @type: endpoint type (bulk, iso, int, ...)
* @udccsr_value: save register of UDCCSR0 for suspend/resume
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index bf803e013458..4b7eb7701470 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -126,7 +126,7 @@ struct s3c_hsudc_req {
/**
* struct s3c_hsudc - Driver's abstraction of the device controller.
* @gadget: Instance of usb_gadget which is referenced by gadget driver.
- * @driver: Reference to currenty active gadget driver.
+ * @driver: Reference to currently active gadget driver.
* @dev: The device reference used by probe function.
* @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
* @regs: Remapped base address of controller's register space.
@@ -633,7 +633,7 @@ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
}
/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
- * @hsudc: Device controller on which endpoint 0 interrupt has occured.
+ * @hsudc: Device controller on which endpoint 0 interrupt has occurred.
*
* Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
* when a stall handshake is sent to host or data is sent/received on
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index d9c406bdb680..6d31ccf6aee5 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -1434,7 +1434,7 @@ __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
return 0;
}
- /* Halt DMA for this endpiont. */
+ /* Halt DMA for this endpoint. */
if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
ep_pause(xudc, ep->index);
ep_wait_for_inactive(xudc, ep->index);
@@ -3423,7 +3423,7 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
}
/*
- * Compliacne suite appears to be violating polling LFPS tBurst max
+ * Compliance suite appears to be violating polling LFPS tBurst max
* of 1.4us. Send 1.45us instead.
*/
val = xudc_readl(xudc, SSPX_CORE_CNT32);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 428c755cf2e1..4827e3cd3834 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -632,7 +632,7 @@ top:
dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n",
ep->ep_usb.name, count, is_short ? "/S" : "", req,
req->usb_req.actual, req->usb_req.length);
- bufferspace -= count;
+
/* Completion */
if ((req->usb_req.actual == req->usb_req.length) || is_short) {
if (udc->dma_enabled && req->usb_req.length)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 7f4a03e8647a..8c45bc17a580 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -61,11 +61,6 @@ static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
__raw_writel(val, base + reg);
}
-static inline u32 ehci_read(void __iomem *base, u32 reg)
-{
- return __raw_readl(base + reg);
-}
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 1115431a255d..f343967443e2 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -518,6 +518,7 @@ static struct platform_driver ehci_platform_driver = {
.pm = pm_ptr(&ehci_platform_pm_ops),
.of_match_table = vt8500_ehci_ids,
.acpi_match_table = ACPI_PTR(ehci_acpi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a2a5c2996350..1163af6fad77 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -645,7 +645,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
@@ -1218,7 +1218,7 @@ static int ehci_submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 67a6ee8cb5d8..3d7893747835 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -32,6 +32,8 @@
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
- "The USB host controller does not support full speed "
- "nor low speed devices\n");
+ "The USB host controller does not support full speed nor low speed devices\n");
dev_warn(hcd->self.controller,
- "You can reconfigure the host controller to have "
- "full speed support\n");
+ "You can reconfigure the host controller to have full speed support\n");
}
return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, negative error code otherwise
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
@@ -196,6 +198,8 @@ err_irq:
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
+ *
+ * Return: Always return 0
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index a8e1048278d0..2ba09c3fbc2f 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -408,8 +408,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
- % usb_maxpacket(urb->dev, pipe,
- usb_pipeout(pipe))) != 0)
+ % usb_maxpacket(urb->dev, pipe)) != 0)
size++;
break;
case PIPE_ISOCHRONOUS:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index c3fd375b4778..f8c111e08a0d 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -2596,7 +2596,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 8835f6bd528e..4f564d71bb0b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -726,7 +726,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL) {
@@ -757,8 +757,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
ep->load = usb_calc_bus_time(udev->speed,
!is_out,
(type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe,
- is_out)) /
+ usb_maxpacket(udev, pipe)) /
1000;
}
hep->hcpriv = ep;
@@ -1541,10 +1540,12 @@ static int isp116x_remove(struct platform_device *pdev)
iounmap(isp116x->data_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
iounmap(isp116x->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index d8610ce8f2ec..0e14d1d07709 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1279,7 +1279,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
ep->udev = usb_get_dev(udev);
ep->hep = hep;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
usb_settoggle(udev, epnum, is_out, 0);
@@ -1299,8 +1299,8 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
ep->interval = urb->interval;
ep->branch = PERIODIC_SIZE;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
- (type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe, is_out)) / 1000;
+ type == PIPE_ISOCHRONOUS,
+ usb_maxpacket(udev, pipe)) / 1000;
break;
}
hep->hcpriv = ep;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 99a5523a79fb..502a3ac5e35b 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -546,7 +546,7 @@ max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
return MAX3421_HXFR_BULK_OUT(epnum);
}
- max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+ max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
@@ -952,7 +952,7 @@ max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
* USB 2.0 Section 5.3.2 Pipes: packets must be full size
* except for last one.
*/
- max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+ max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
* We do not support isochronous transfers at this
@@ -998,7 +998,7 @@ max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
* max_packet as an indicator that the end of the
* packet has been reached).
*/
- u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+ u32 max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max3421_hcd->curr_len == max_packet)
return 0;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 666b1c665188..c4c821c2288c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -181,8 +181,7 @@ static int ohci_urb_enqueue (
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
- % usb_maxpacket (urb->dev, pipe,
- usb_pipeout (pipe))) == 0)
+ % usb_maxpacket(urb->dev, pipe)) == 0)
size++;
break;
case PIPE_ISOCHRONOUS: /* number of packets from URB */
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 069791d25abb..f5bc9c8bdc9a 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -259,6 +259,10 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
goto err_put_hcd;
}
+ retval = clk_prepare(priv->usb_host_ck);
+ if (retval)
+ goto err_put_host_ck;
+
if (!cpu_is_omap15xx())
priv->usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
else
@@ -266,13 +270,17 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
if (IS_ERR(priv->usb_dc_ck)) {
retval = PTR_ERR(priv->usb_dc_ck);
- goto err_put_host_ck;
+ goto err_unprepare_host_ck;
}
+ retval = clk_prepare(priv->usb_dc_ck);
+ if (retval)
+ goto err_put_dc_ck;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
retval = -EBUSY;
- goto err_put_dc_ck;
+ goto err_unprepare_dc_ck;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
@@ -297,8 +305,12 @@ err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_unprepare_dc_ck:
+ clk_unprepare(priv->usb_dc_ck);
err_put_dc_ck:
clk_put(priv->usb_dc_ck);
+err_unprepare_host_ck:
+ clk_unprepare(priv->usb_host_ck);
err_put_host_ck:
clk_put(priv->usb_host_ck);
err_put_hcd:
@@ -333,7 +345,9 @@ static int ohci_hcd_omap_remove(struct platform_device *pdev)
}
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ clk_unprepare(priv->usb_dc_ck);
clk_put(priv->usb_dc_ck);
+ clk_unprepare(priv->usb_host_ck);
clk_put(priv->usb_host_ck);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 4a8456f12a73..47dfbfe9e519 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -334,6 +334,7 @@ static struct platform_driver ohci_platform_driver = {
.name = "ohci-platform",
.pm = &ohci_platform_pm_ops,
.of_match_table = ohci_platform_ids,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 45f7cceb6df3..1960b8dfdba5 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -19,9 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
-
-
static int
ohci_ppc_of_start(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 54aa5c77e549..ab4f610a0140 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -36,8 +36,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/pxa/cpu.h>
#include "ohci.h"
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index b741670525e3..3a441310c713 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1685,7 +1685,7 @@ static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
@@ -1796,7 +1796,7 @@ static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxp = usb_maxpacket(urb->dev, urb->pipe);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
@@ -3909,8 +3909,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd)
}
}
+ spin_unlock_irq(&oxu->lock);
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
+ spin_lock_irq(&oxu->lock);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
@@ -4223,13 +4225,9 @@ static int oxu_drv_probe(struct platform_device *pdev)
/*
* Get the platform resources
*/
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 63719cdf6a4e..abb88dd40d4e 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1867,8 +1867,7 @@ static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
td->pipe = hep->hcpriv;
td->urb = urb;
td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
- td->maxpacket = usb_maxpacket(urb->dev, urb->pipe,
- !usb_pipein(urb->pipe));
+ td->maxpacket = usb_maxpacket(urb->dev, urb->pipe);
if (usb_pipecontrol(urb->pipe))
td->type = USB_PID_SETUP;
else if (usb_pipein(urb->pipe))
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 85623731a516..d206bd95c7bb 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -842,7 +842,7 @@ static int sl811h_urb_enqueue(
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
usb_settoggle(udev, epnum, is_out, 0);
@@ -878,8 +878,8 @@ static int sl811h_urb_enqueue(
if (type == PIPE_ISOCHRONOUS)
ep->defctrl |= SL11H_HCTLMASK_ISOCH;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
- (type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe, is_out))
+ type == PIPE_ISOCHRONOUS,
+ usb_maxpacket(udev, pipe))
/ 1000;
break;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index f65f1ba2b592..c54f2bc23d3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -707,6 +707,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
__must_hold(&xhci->lock)
{
+ struct usb_hcd *usb3_hcd = xhci_get_usb3_hcd(xhci);
int i, retval;
/* Disable all Device Slots */
@@ -727,7 +728,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
+ xhci_set_port_power(xhci, usb3_hcd, i, false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bbb27ee2c6a3..8c19e151a945 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -782,14 +782,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
/***************** Device context manipulation *************************/
-static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
- struct xhci_virt_ep *ep)
-{
- timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
- 0);
- ep->xhci = xhci;
-}
-
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
@@ -994,11 +986,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
- /* Initialize the cancellation list and watchdog timers for each ep */
+ /* Initialize the cancellation and bandwidth list for each ep */
for (i = 0; i < 31; i++) {
dev->eps[i].ep_index = i;
dev->eps[i].vdev = dev;
- xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+ dev->eps[i].xhci = xhci;
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
@@ -1072,7 +1064,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
- hcd = xhci->shared_hcd;
+ hcd = xhci_get_usb3_hcd(xhci);
else
hcd = xhci->main_hcd;
@@ -2362,10 +2354,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
- /*
- * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
- * Not sure how the USB core will handle a hub with no ports...
- */
+ if (!xhci->usb2_rhub.num_ports)
+ xhci_info(xhci, "USB2 root hub has no ports\n");
+
+ if (!xhci->usb3_rhub.num_ports)
+ xhci_info(xhci, "USB3 root hub has no ports\n");
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7e0e6ebf080..fac9492a8bda 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
@@ -129,8 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "QUIRK: Fresco Logic xHC needs configure"
- " endpoint cmd after reset endpoint");
+ "XHCI_RESET_EP_QUIRK for this evaluation HW is deprecated");
}
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x4) {
@@ -268,6 +268,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 649ffd861b44..044855818cb1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -180,7 +180,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct device *sysdev, *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
- struct usb_hcd *hcd;
+ struct usb_hcd *hcd, *usb3_hcd;
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
@@ -245,6 +245,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
xhci = hcd_to_xhci(hcd);
+ xhci->allow_single_roothub = 1;
+
/*
* Not all platforms have clks so it is not an error if the
* clock do not exist.
@@ -283,12 +285,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
xhci->main_hcd = hcd;
- xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
- dev_name(&pdev->dev), hcd);
- if (!xhci->shared_hcd) {
- ret = -ENOMEM;
- goto disable_clk;
- }
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
@@ -313,16 +309,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
- goto put_usb3_hcd;
+ goto disable_clk;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
- goto put_usb3_hcd;
+ goto disable_clk;
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- xhci->shared_hcd->tpl_support = hcd->tpl_support;
+
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
@@ -333,12 +329,26 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto disable_usb_phy;
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
- xhci->shared_hcd->can_do_streams = 1;
+ if (!xhci_has_one_roothub(xhci)) {
+ xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+ dev_name(&pdev->dev), hcd);
+ if (!xhci->shared_hcd) {
+ ret = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
- ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
- if (ret)
- goto dealloc_usb2_hcd;
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
+ }
+
+ usb3_hcd = xhci_get_usb3_hcd(xhci);
+ if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ usb3_hcd->can_do_streams = 1;
+
+ if (xhci->shared_hcd) {
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto put_usb3_hcd;
+ }
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -352,15 +362,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
return 0;
+put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
disable_usb_phy:
usb_phy_shutdown(hcd->usb_phy);
-put_usb3_hcd:
- usb_put_hcd(xhci->shared_hcd);
-
disable_clk:
clk_disable_unprepare(xhci->clk);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f9707997969d..46d0b9ad6f74 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -740,14 +740,6 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
-static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
- struct xhci_virt_ep *ep)
-{
- ep->ep_state &= ~EP_STOP_CMD_PENDING;
- /* Can't del_timer_sync in interrupt */
- del_timer(&ep->stop_cmd_timer);
-}
-
/*
* Must be called with xhci->lock held in interrupt context,
* releases and re-acquires xhci->lock
@@ -1122,18 +1114,17 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
reset_type);
if (err)
break;
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
-
- mod_timer(&ep->stop_cmd_timer,
- jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
+ if (!command) {
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+ return;
+ }
xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
xhci_ring_cmd_db(xhci);
@@ -1142,9 +1133,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
}
}
+
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
xhci_invalidate_cancelled_tds(ep);
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
/* Otherwise ring the doorbell(s) to restart queued transfers */
xhci_giveback_invalidated_tds(ep);
@@ -1248,61 +1240,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
-/* Watchdog timer function for when a stop endpoint command fails to complete.
- * In this case, we assume the host controller is broken or dying or dead. The
- * host may still be completing some other events, so we have to be careful to
- * let the event ring handler and the URB dequeueing/enqueueing functions know
- * through xhci->state.
- *
- * The timer may also fire if the host takes a very long time to respond to the
- * command, and the stop endpoint command completion handler cannot delete the
- * timer before the timer function is called. Another endpoint cancellation may
- * sneak in before the timer function can grab the lock, and that may queue
- * another stop endpoint command and add the timer back. So we cannot use a
- * simple flag to say whether there is a pending stop endpoint command for a
- * particular endpoint.
- *
- * Instead we use a combination of that flag and checking if a new timer is
- * pending.
- */
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
-{
- struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
- struct xhci_hcd *xhci = ep->xhci;
- unsigned long flags;
- u32 usbsts;
- char str[XHCI_MSG_MAX];
-
- spin_lock_irqsave(&xhci->lock, flags);
-
- /* bail out if cmd completed but raced with stop ep watchdog timer.*/
- if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
- timer_pending(&ep->stop_cmd_timer)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
- return;
- }
- usbsts = readl(&xhci->op_regs->status);
-
- xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
- xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
-
- ep->ep_state &= ~EP_STOP_CMD_PENDING;
-
- xhci_halt(xhci);
-
- /*
- * handle a stop endpoint cmd timeout as if host died (-ENODEV).
- * In the future we could distinguish between -ENODEV and -ETIMEDOUT
- * and try to recover a -ETIMEDOUT with a host controller reset
- */
- xhci_hc_died(xhci);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "xHCI host controller is dead.");
-}
-
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
@@ -1489,8 +1426,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
xhci_invalidate_cancelled_tds(ep);
- if (xhci->quirks & XHCI_RESET_EP_QUIRK)
- xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
/* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
@@ -1534,17 +1469,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
- unsigned int ep_state;
- u32 add_flags, drop_flags;
+ u32 add_flags;
/*
- * Configure endpoint commands can come from the USB core
- * configuration or alt setting changes, or because the HW
- * needed an extra configure endpoint command after a reset
- * endpoint command or streams were being configured.
- * If the command was for a halted endpoint, the xHCI driver
- * is not waiting on the configure endpoint command.
+ * Configure endpoint commands can come from the USB core configuration
+ * or alt setting changes, or when streams were being configured.
*/
+
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return;
@@ -1555,34 +1486,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
}
add_flags = le32_to_cpu(ctrl_ctx->add_flags);
- drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(add_flags) - 1;
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
trace_xhci_handle_cmd_config_ep(ep_ctx);
- /* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware. Not worth
- * worrying about, since this is prototype hardware. Not sure
- * if this will work for streams, but streams support was
- * untested on this prototype.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_index != (unsigned int) -1 &&
- add_flags - SLOT_FLAG == drop_flags) {
- ep_state = virt_dev->eps[ep_index].ep_state;
- if (!(ep_state & EP_HALTED))
- return;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Completed config ep cmd - "
- "last ep index = %d, state = %d",
- ep_index, ep_state);
- /* Clear internal halted state and restart ring(s) */
- virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- return;
- }
return;
}
@@ -1650,9 +1560,12 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
void xhci_handle_command_timeout(struct work_struct *work)
{
- struct xhci_hcd *xhci;
- unsigned long flags;
- u64 hw_ring_state;
+ struct xhci_hcd *xhci;
+ unsigned long flags;
+ char str[XHCI_MSG_MAX];
+ u64 hw_ring_state;
+ u32 cmd_field3;
+ u32 usbsts;
xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
@@ -1666,6 +1579,27 @@ void xhci_handle_command_timeout(struct work_struct *work)
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
+
+ cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
+ usbsts = readl(&xhci->op_regs->status);
+ xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
+
+ /* Bail out and tear down xhci if a stop endpoint command failed */
+ if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
+ struct xhci_virt_ep *ep;
+
+ xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
+
+ ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
+ TRB_TO_EP_INDEX(cmd_field3));
+ if (ep)
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+
+ xhci_halt(xhci);
+ xhci_hc_died(xhci);
+ goto time_out_completed;
+ }
+
/* mark this command to be cancelled */
xhci->current_cmd->status = COMP_COMMAND_ABORTED;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 25b87e99b4dd..f0ab63138016 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -486,6 +486,10 @@ static void compliance_mode_recovery(struct timer_list *t)
xhci = from_timer(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
+ hcd = rhub->hcd;
+
+ if (!hcd)
+ return;
for (i = 0; i < rhub->num_ports; i++) {
temp = readl(rhub->ports[i]->addr);
@@ -499,7 +503,6 @@ static void compliance_mode_recovery(struct timer_list *t)
i + 1);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Attempting compliance mode recovery");
- hcd = xhci->shared_hcd;
if (hcd->state == HC_STATE_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
@@ -612,14 +615,11 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
xhci_halt(xhci);
return -ENODEV;
}
- xhci->shared_hcd->state = HC_STATE_RUNNING;
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Finished xhci_run for USB3 roothub");
return 0;
}
@@ -694,12 +694,17 @@ int xhci_run(struct usb_hcd *hcd)
xhci_free_command(xhci, command);
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Finished xhci_run for USB2 roothub");
+ "Finished %s for main hcd", __func__);
xhci_create_dbc_dev(xhci);
xhci_debugfs_init(xhci);
+ if (xhci_has_one_roothub(xhci))
+ return xhci_run_finished(xhci);
+
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_run);
@@ -992,7 +997,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
- xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
return -EINVAL;
/* Clear root port wake on bits if wakeup not allowed. */
@@ -1009,15 +1014,18 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
- clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- del_timer_sync(&xhci->shared_hcd->rh_timer);
+ if (xhci->shared_hcd) {
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
if (xhci->quirks & XHCI_SUSPEND_DELAY)
usleep_range(1000, 1500);
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+ if (xhci->shared_hcd)
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
@@ -1117,7 +1125,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
msleep(100);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+ if (xhci->shared_hcd)
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
@@ -1177,7 +1186,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
- usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
@@ -1217,12 +1227,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
- if (!retval) {
+ if (!retval && secondary_hcd) {
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(secondary_hcd);
}
hcd->state = HC_STATE_SUSPENDED;
- xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ if (xhci->shared_hcd)
+ xhci->shared_hcd->state = HC_STATE_SUSPENDED;
goto done;
}
@@ -1260,7 +1271,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
}
if (pending_portevent) {
- usb_hcd_resume_root_hub(xhci->shared_hcd);
+ if (xhci->shared_hcd)
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
}
@@ -1279,8 +1291,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
__func__, hcd->self.busnum);
- set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- usb_hcd_poll_rh_status(xhci->shared_hcd);
+ if (xhci->shared_hcd) {
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
+ }
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
@@ -1860,9 +1874,6 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
ep->ep_state |= EP_STOP_CMD_PENDING;
- ep->stop_cmd_timer.expires = jiffies +
- XHCI_STOP_EP_CMD_TIMEOUT * HZ;
- add_timer(&ep->stop_cmd_timer);
xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
ep_index, 0);
xhci_ring_cmd_db(xhci);
@@ -3972,10 +3983,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
trace_xhci_free_dev(slot_ctx);
/* Stop any wayward timer functions (which may grab the lock) */
- for (i = 0; i < 31; i++) {
+ for (i = 0; i < 31; i++)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
- del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
- }
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
@@ -4879,9 +4888,6 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
struct usb_device *parent;
unsigned int num_hubs;
- if (state == USB3_LPM_U2)
- return 0;
-
/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
for (parent = udev->parent, num_hubs = 0; parent->parent;
parent = parent->parent)
@@ -4890,7 +4896,7 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
if (num_hubs < 2)
return 0;
- dev_dbg(&udev->dev, "Disabling U1 link state for device"
+ dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
" below second-tier hub.\n");
dev_dbg(&udev->dev, "Plug device into first-tier hub "
"to decrease power consumption.\n");
@@ -4931,9 +4937,6 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
return timeout;
}
- if (xhci_check_tier_policy(xhci, udev, state) < 0)
- return timeout;
-
/* Gather some information about the currently installed configuration
* and alternate interface settings.
*/
@@ -5040,6 +5043,9 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
!xhci->devs[udev->slot_id])
return USB3_LPM_DISABLED;
+ if (xhci_check_tier_policy(xhci, udev, state) < 0)
+ return USB3_LPM_DISABLED;
+
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
@@ -5207,6 +5213,57 @@ static int xhci_get_frame(struct usb_hcd *hcd)
return readl(&xhci->run_regs->microframe_index) >> 3;
}
+static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+ xhci->usb2_rhub.hcd = hcd;
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ /*
+ * USB 2.0 roothub under xHCI has an integrated TT,
+ * (rate matching hub) as opposed to having an OHCI/UHCI
+ * companion controller.
+ */
+ hcd->has_tt = 1;
+}
+
+static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+ unsigned int minor_rev;
+
+ /*
+ * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+ * should return 0x31 for sbrn, or that the minor revision
+ * is a two digit BCD containig minor and sub-minor numbers.
+ * This was later clarified in xHCI 1.2.
+ *
+ * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+ * minor revision set to 0x1 instead of 0x10.
+ */
+ if (xhci->usb3_rhub.min_rev == 0x1)
+ minor_rev = 1;
+ else
+ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+
+ switch (minor_rev) {
+ case 2:
+ hcd->speed = HCD_USB32;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ hcd->self.root_hub->rx_lanes = 2;
+ hcd->self.root_hub->tx_lanes = 2;
+ hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
+ break;
+ case 1:
+ hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ }
+ xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
+ minor_rev, minor_rev ? "Enhanced " : "");
+
+ xhci->usb3_rhub.hcd = hcd;
+}
+
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
@@ -5215,7 +5272,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
* quirks
*/
struct device *dev = hcd->self.sysdev;
- unsigned int minor_rev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
@@ -5229,61 +5285,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci = hcd_to_xhci(hcd);
- if (usb_hcd_is_primary_hcd(hcd)) {
- xhci->main_hcd = hcd;
- xhci->usb2_rhub.hcd = hcd;
- /* Mark the first roothub as being USB 2.0.
- * The xHCI driver will register the USB 3.0 roothub.
- */
- hcd->speed = HCD_USB2;
- hcd->self.root_hub->speed = USB_SPEED_HIGH;
- /*
- * USB 2.0 roothub under xHCI has an integrated TT,
- * (rate matching hub) as opposed to having an OHCI/UHCI
- * companion controller.
- */
- hcd->has_tt = 1;
- } else {
- /*
- * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
- * should return 0x31 for sbrn, or that the minor revision
- * is a two digit BCD containig minor and sub-minor numbers.
- * This was later clarified in xHCI 1.2.
- *
- * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
- * minor revision set to 0x1 instead of 0x10.
- */
- if (xhci->usb3_rhub.min_rev == 0x1)
- minor_rev = 1;
- else
- minor_rev = xhci->usb3_rhub.min_rev / 0x10;
-
- switch (minor_rev) {
- case 2:
- hcd->speed = HCD_USB32;
- hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
- hcd->self.root_hub->rx_lanes = 2;
- hcd->self.root_hub->tx_lanes = 2;
- hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
- break;
- case 1:
- hcd->speed = HCD_USB31;
- hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
- hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
- break;
- }
- xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
- minor_rev,
- minor_rev ? "Enhanced " : "");
-
- xhci->usb3_rhub.hcd = hcd;
- /* xHCI private pointer was set in xhci_pci_probe for the second
- * registered roothub.
- */
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ xhci_hcd_init_usb3_data(xhci, hcd);
return 0;
}
mutex_init(&xhci->mutex);
+ xhci->main_hcd = hcd;
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5358,6 +5366,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Called HCD init\n");
+ if (xhci_hcd_is_usb3(hcd))
+ xhci_hcd_init_usb3_data(xhci, hcd);
+ else
+ xhci_hcd_init_usb2_data(xhci, hcd);
+
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
xhci->hcc_params, xhci->hci_version, xhci->quirks);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 473a33ce299e..0bd76c94a4b1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -948,8 +948,6 @@ struct xhci_virt_ep {
#define EP_CLEARING_TT (1 << 8)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- /* Watchdog timer for stop endpoint command to cancel URBs */
- struct timer_list stop_cmd_timer;
struct xhci_hcd *xhci;
/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
* command. We'll need to update the ring's dequeue segment and dequeue
@@ -1848,7 +1846,7 @@ struct xhci_hcd {
#define XHCI_STATE_REMOVING (1 << 2)
unsigned long long quirks;
#define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
-#define XHCI_RESET_EP_QUIRK BIT_ULL(1)
+#define XHCI_RESET_EP_QUIRK BIT_ULL(1) /* Deprecated */
#define XHCI_NEC_HOST BIT_ULL(2)
#define XHCI_AMD_PLL_FIX BIT_ULL(3)
#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4)
@@ -1911,6 +1909,8 @@ struct xhci_hcd {
unsigned hw_lpm_support:1;
/* Broken Suspend flag for SNPS Suspend resume issue */
unsigned broken_suspend:1;
+ /* Indicates that omitting hcd is supported if root hub has no ports */
+ unsigned allow_single_roothub:1;
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
@@ -1966,6 +1966,30 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
return xhci->main_hcd;
}
+static inline struct usb_hcd *xhci_get_usb3_hcd(struct xhci_hcd *xhci)
+{
+ if (xhci->shared_hcd)
+ return xhci->shared_hcd;
+
+ if (!xhci->usb2_rhub.num_ports)
+ return xhci->main_hcd;
+
+ return NULL;
+}
+
+static inline bool xhci_hcd_is_usb3(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return hcd == xhci_get_usb3_hcd(xhci);
+}
+
+static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
+{
+ return xhci->allow_single_roothub &&
+ (!xhci->usb2_rhub.num_ports || !xhci->usb3_rhub.num_ports);
+}
+
#define xhci_dbg(xhci, fmt, args...) \
dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_err(xhci, fmt, args...) \
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index d1d9a7d5da17..af88f4fe00d2 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -251,6 +251,8 @@ static const struct reg_field isp1760_hc_reg_fields[] = {
[HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
[HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
[HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct reg_field isp1763_hc_reg_fields[] = {
@@ -321,6 +323,8 @@ static const struct reg_field isp1763_hc_reg_fields[] = {
[HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
[HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
[HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_hc_volatile_ranges[] = {
@@ -405,6 +409,8 @@ static const struct reg_field isp1761_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_dc_volatile_ranges[] = {
@@ -458,6 +464,8 @@ static const struct reg_field isp1763_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_config isp1763_dc_regmap_conf = {
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 893becb077d3..76862ba40f35 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -825,8 +825,7 @@ static void create_ptd_atl(struct isp1760_qh *qh,
memset(ptd, 0, sizeof(*ptd));
/* according to 3.6.2, max packet len can not be > 0x400 */
- maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
- usb_pipeout(qtd->urb->pipe));
+ maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe);
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
@@ -1808,8 +1807,7 @@ static void packetize_urb(struct usb_hcd *hcd,
packet_type = IN_PID;
}
- maxpacketsize = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ maxpacketsize = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 6c38c62d29b2..b2f980409d0b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -1449,8 +1449,7 @@ wait:if (ftdi->disconnected > 0) {
command->length = 0x8007;
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 8;
command->value = 0;
command->buffer = urb->setup_packet;
@@ -1514,8 +1513,7 @@ wait:if (ftdi->disconnected > 0) {
1);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
@@ -1571,8 +1569,7 @@ wait:if (ftdi->disconnected > 0) {
command->length = 0x0000;
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
@@ -1634,8 +1631,7 @@ wait:if (ftdi->disconnected > 0) {
command->header = 0x81 | (ed << 5);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = min_t(u32, 1024,
urb->transfer_buffer_length -
urb->actual_length);
@@ -1715,8 +1711,7 @@ wait:if (ftdi->disconnected > 0) {
1);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index f8686139d6f3..25ec5666a75e 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -437,7 +437,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
INIT_WORK(&lvs->rh_work, lvs_rh_work);
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(hdev, pipe);
usb_fill_int_urb(lvs->urb, hdev, pipe, &lvs->buffer[0], maxp,
lvs_rh_irq, lvs, endpoint->bInterval);
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 1aeb34dbe24f..cad991380b0c 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -36,6 +36,8 @@
#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+#define MTK_MUSB_CLKS_NUM 3
+
struct mtk_glue {
struct device *dev;
struct musb *musb;
@@ -44,9 +46,7 @@ struct mtk_glue {
struct phy *phy;
struct usb_phy *xceiv;
enum phy_mode phy_mode;
- struct clk *main;
- struct clk *mcu;
- struct clk *univpll;
+ struct clk_bulk_data clks[MTK_MUSB_CLKS_NUM];
enum usb_role role;
struct usb_role_switch *role_sw;
};
@@ -55,64 +55,11 @@ static int mtk_musb_clks_get(struct mtk_glue *glue)
{
struct device *dev = glue->dev;
- glue->main = devm_clk_get(dev, "main");
- if (IS_ERR(glue->main)) {
- dev_err(dev, "fail to get main clock\n");
- return PTR_ERR(glue->main);
- }
-
- glue->mcu = devm_clk_get(dev, "mcu");
- if (IS_ERR(glue->mcu)) {
- dev_err(dev, "fail to get mcu clock\n");
- return PTR_ERR(glue->mcu);
- }
-
- glue->univpll = devm_clk_get(dev, "univpll");
- if (IS_ERR(glue->univpll)) {
- dev_err(dev, "fail to get univpll clock\n");
- return PTR_ERR(glue->univpll);
- }
-
- return 0;
-}
+ glue->clks[0].id = "main";
+ glue->clks[1].id = "mcu";
+ glue->clks[2].id = "univpll";
-static int mtk_musb_clks_enable(struct mtk_glue *glue)
-{
- int ret;
-
- ret = clk_prepare_enable(glue->main);
- if (ret) {
- dev_err(glue->dev, "failed to enable main clock\n");
- goto err_main_clk;
- }
-
- ret = clk_prepare_enable(glue->mcu);
- if (ret) {
- dev_err(glue->dev, "failed to enable mcu clock\n");
- goto err_mcu_clk;
- }
-
- ret = clk_prepare_enable(glue->univpll);
- if (ret) {
- dev_err(glue->dev, "failed to enable univpll clock\n");
- goto err_univpll_clk;
- }
-
- return 0;
-
-err_univpll_clk:
- clk_disable_unprepare(glue->mcu);
-err_mcu_clk:
- clk_disable_unprepare(glue->main);
-err_main_clk:
- return ret;
-}
-
-static void mtk_musb_clks_disable(struct mtk_glue *glue)
-{
- clk_disable_unprepare(glue->univpll);
- clk_disable_unprepare(glue->mcu);
- clk_disable_unprepare(glue->main);
+ return devm_clk_bulk_get(dev, MTK_MUSB_CLKS_NUM, glue->clks);
}
static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
@@ -390,7 +337,7 @@ static int mtk_musb_exit(struct musb *musb)
mtk_otg_switch_exit(glue);
phy_power_off(glue->phy);
phy_exit(glue->phy);
- mtk_musb_clks_disable(glue);
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -528,7 +475,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ret = mtk_musb_clks_enable(glue);
+ ret = clk_bulk_prepare_enable(MTK_MUSB_CLKS_NUM, glue->clks);
if (ret)
goto err_enable_clk;
@@ -551,7 +498,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
return 0;
err_device_register:
- mtk_musb_clks_disable(glue);
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
err_enable_clk:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d2b7e613eb34..f571a65ae6ee 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_probe(struct platform_device *pdev)
control_node = of_parse_phandle(np, "ctrl-module", 0);
if (control_node) {
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
ret = -EINVAL;
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index ee0863c6553e..6e6ef8c0bc7e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev)
return -ENODEV;
extcon = extcon_get_extcon_dev(config->extcon);
- if (!extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(extcon))
+ return PTR_ERR(extcon);
otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
if (!otg_dev)
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index c0e4df87ff22..39eaa7b97c40 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -208,10 +208,9 @@ static void ark3116_set_termios(struct tty_struct *tty,
lcr |= UART_LCR_PARITY;
if (!(cflag & PARODD))
lcr |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
+
/* handshake control */
hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 49c08f07c969..b440d338a895 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1671,7 +1671,7 @@ static ssize_t latency_timer_show(struct device *dev,
if (priv->flags & ASYNC_LOW_LATENCY)
return sprintf(buf, "1\n");
else
- return sprintf(buf, "%i\n", priv->latency);
+ return sprintf(buf, "%u\n", priv->latency);
}
/* Write a new value of the latency timer, in units of milliseconds. */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 152ad882657d..e60425bbf537 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1137,6 +1137,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1d878d05a658..3506c47e1eef 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -421,6 +421,9 @@ static int pl2303_detect_type(struct usb_serial *serial)
bcdUSB = le16_to_cpu(desc->bcdUSB);
switch (bcdUSB) {
+ case 0x101:
+ /* USB 1.0.1? Let's assume they meant 1.1... */
+ fallthrough;
case 0x110:
switch (bcdDevice) {
case 0x300:
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 06aad0d727dd..332fb92ae575 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -30,10 +30,6 @@
#include <linux/usb/ezusb.h>
#include "whiteheat.h" /* WhiteHEAT specific commands */
-#ifndef CMSPAR
-#define CMSPAR 0
-#endif
-
/*
* Version Information
*/
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 20b857e97e60..747be69e5e69 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -1104,7 +1104,7 @@ static int init_alauda(struct us_data *us)
us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO);
if (!us->extra)
- return USB_STOR_TRANSPORT_ERROR;
+ return -ENOMEM;
info = (struct alauda_info *) us->extra;
us->extra_destructor = alauda_info_destructor;
@@ -1113,7 +1113,7 @@ static int init_alauda(struct us_data *us)
altsetting->endpoint[0].desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 05429f1f69f9..4e0eef1440b7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1449,7 +1449,7 @@ static void isd200_free_info_ptrs(void *info_)
* Allocates (if necessary) and initializes the driver structure.
*
* RETURNS:
- * ISD status code
+ * error status code
*/
static int isd200_init_info(struct us_data *us)
{
@@ -1457,7 +1457,7 @@ static int isd200_init_info(struct us_data *us)
info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
- return ISD200_ERROR;
+ return -ENOMEM;
info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
@@ -1466,13 +1466,13 @@ static int isd200_init_info(struct us_data *us)
if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
isd200_free_info_ptrs(info);
kfree(info);
- return ISD200_ERROR;
+ return -ENOMEM;
}
us->extra = info;
us->extra_destructor = isd200_free_info_ptrs;
- return ISD200_GOOD;
+ return 0;
}
/**************************************************************************
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 05cec81dcd3f..38ddfedef629 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra)
static int rio_karma_init(struct us_data *us)
{
- int ret = 0;
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
if (!data)
- goto out;
+ return -ENOMEM;
data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
if (!data->recv) {
kfree(data);
- goto out;
+ return -ENOMEM;
}
us->extra = data;
us->extra_destructor = rio_karma_destructor;
- ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
- data->in_storage = (ret == 0);
-out:
- return ret;
+ if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
+ return -EIO;
+
+ data->in_storage = 1;
+
+ return 0;
}
static struct scsi_host_template karma_host_template;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index a989fe930e21..1db2eefeea22 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -180,7 +180,7 @@ static int onetouch_connect_input(struct us_data *ss)
return -ENODEV;
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = min(maxp, ONETOUCH_PKT_LEN);
onetouch = kzalloc(sizeof(struct usb_onetouch), GFP_KERNEL);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 54aa1392c9ca..f0d0ca37163d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1456,7 +1456,7 @@ static int init_usbat(struct us_data *us, int devicetype)
us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO);
if (!us->extra)
- return 1;
+ return -ENOMEM;
info = (struct usbat_info *) (us->extra);
@@ -1465,7 +1465,7 @@ static int init_usbat(struct us_data *us, int devicetype)
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 1\n");
@@ -1473,42 +1473,42 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 2\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 3\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 4\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 5\n");
/* Enable peripheral control signals and card detect */
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 6\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 7\n");
@@ -1516,19 +1516,19 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 8\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 9\n");
/* At this point, we need to detect which device we are using */
if (usbat_set_transport(us, info, devicetype))
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 10\n");
@@ -1539,11 +1539,11 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1),
0x00, 0x88, 0x08, subcountH, subcountL);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 11\n");
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
/*
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1928b3918242..64d96d210e02 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -363,7 +363,7 @@ static int usb_stor_intr_transfer(struct us_data *us, void *buf,
usb_stor_dbg(us, "xfer %u bytes\n", length);
/* calculate the max packet size */
- maxp = usb_maxpacket(us->pusb_dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(us->pusb_dev, pipe);
if (maxp > length)
maxp = length;
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 78e0e78954f2..26ea2fdec17d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -24,7 +24,7 @@ typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
state.mode = conf;
state.data = data;
- return alt->mux->set(alt->mux, &state);
+ return typec_mux_set(alt->mux, &state);
}
static int typec_altmode_set_state(struct typec_altmode *adev,
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index c8340de0ed49..fd55c2c516a5 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -17,9 +17,16 @@
#include "class.h"
#include "mux.h"
+#define TYPEC_MUX_MAX_DEVS 3
+
+struct typec_switch {
+ struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
+ unsigned int num_sw_devs;
+};
+
static int switch_fwnode_match(struct device *dev, const void *fwnode)
{
- if (!is_typec_switch(dev))
+ if (!is_typec_switch_dev(dev))
return 0;
return dev_fwnode(dev) == fwnode;
@@ -49,7 +56,7 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_switch_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -63,14 +70,50 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
*/
struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
{
+ struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
struct typec_switch *sw;
+ int count;
+ int err;
+ int i;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
+
+ count = fwnode_connection_find_matches(fwnode, "orientation-switch", NULL,
+ typec_switch_match,
+ (void **)sw_devs,
+ ARRAY_SIZE(sw_devs));
+ if (count <= 0) {
+ kfree(sw);
+ return NULL;
+ }
- sw = fwnode_connection_find_match(fwnode, "orientation-switch", NULL,
- typec_switch_match);
- if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ for (i = 0; i < count; i++) {
+ if (IS_ERR(sw_devs[i])) {
+ err = PTR_ERR(sw_devs[i]);
+ goto put_sw_devs;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(!try_module_get(sw_devs[i]->dev.parent->driver->owner));
+ sw->sw_devs[i] = sw_devs[i];
+ }
+
+ sw->num_sw_devs = count;
return sw;
+
+put_sw_devs:
+ for (i = 0; i < count; i++) {
+ if (!IS_ERR(sw_devs[i]))
+ put_device(&sw_devs[i]->dev);
+ }
+
+ kfree(sw);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
@@ -82,16 +125,25 @@ EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
*/
void typec_switch_put(struct typec_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
- put_device(&sw->dev);
+ struct typec_switch_dev *sw_dev;
+ unsigned int i;
+
+ if (IS_ERR_OR_NULL(sw))
+ return;
+
+ for (i = 0; i < sw->num_sw_devs; i++) {
+ sw_dev = sw->sw_devs[i];
+
+ module_put(sw_dev->dev.parent->driver->owner);
+ put_device(&sw_dev->dev);
}
+ kfree(sw);
}
EXPORT_SYMBOL_GPL(typec_switch_put);
static void typec_switch_release(struct device *dev)
{
- kfree(to_typec_switch(dev));
+ kfree(to_typec_switch_dev(dev));
}
const struct device_type typec_switch_dev_type = {
@@ -109,82 +161,102 @@ const struct device_type typec_switch_dev_type = {
* connector to the USB controllers. USB Type-C plugs can be inserted
* right-side-up or upside-down.
*/
-struct typec_switch *
+struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc)
{
- struct typec_switch *sw;
+ struct typec_switch_dev *sw_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
- sw = kzalloc(sizeof(*sw), GFP_KERNEL);
- if (!sw)
+ sw_dev = kzalloc(sizeof(*sw_dev), GFP_KERNEL);
+ if (!sw_dev)
return ERR_PTR(-ENOMEM);
- sw->set = desc->set;
+ sw_dev->set = desc->set;
- device_initialize(&sw->dev);
- sw->dev.parent = parent;
- sw->dev.fwnode = desc->fwnode;
- sw->dev.class = &typec_mux_class;
- sw->dev.type = &typec_switch_dev_type;
- sw->dev.driver_data = desc->drvdata;
- dev_set_name(&sw->dev, "%s-switch",
- desc->name ? desc->name : dev_name(parent));
+ device_initialize(&sw_dev->dev);
+ sw_dev->dev.parent = parent;
+ sw_dev->dev.fwnode = desc->fwnode;
+ sw_dev->dev.class = &typec_mux_class;
+ sw_dev->dev.type = &typec_switch_dev_type;
+ sw_dev->dev.driver_data = desc->drvdata;
+ ret = dev_set_name(&sw_dev->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&sw_dev->dev);
+ return ERR_PTR(ret);
+ }
- ret = device_add(&sw->dev);
+ ret = device_add(&sw_dev->dev);
if (ret) {
dev_err(parent, "failed to register switch (%d)\n", ret);
- put_device(&sw->dev);
+ put_device(&sw_dev->dev);
return ERR_PTR(ret);
}
- return sw;
+ return sw_dev;
}
EXPORT_SYMBOL_GPL(typec_switch_register);
int typec_switch_set(struct typec_switch *sw,
enum typec_orientation orientation)
{
+ struct typec_switch_dev *sw_dev;
+ unsigned int i;
+ int ret;
+
if (IS_ERR_OR_NULL(sw))
return 0;
- return sw->set(sw, orientation);
+ for (i = 0; i < sw->num_sw_devs; i++) {
+ sw_dev = sw->sw_devs[i];
+
+ ret = sw_dev->set(sw_dev, orientation);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_switch_set);
/**
* typec_switch_unregister - Unregister USB Type-C orientation switch
- * @sw: USB Type-C orientation switch
+ * @sw_dev: USB Type-C orientation switch
*
* Unregister switch that was registered with typec_switch_register().
*/
-void typec_switch_unregister(struct typec_switch *sw)
+void typec_switch_unregister(struct typec_switch_dev *sw_dev)
{
- if (!IS_ERR_OR_NULL(sw))
- device_unregister(&sw->dev);
+ if (!IS_ERR_OR_NULL(sw_dev))
+ device_unregister(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_unregister);
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data)
+void typec_switch_set_drvdata(struct typec_switch_dev *sw_dev, void *data)
{
- dev_set_drvdata(&sw->dev, data);
+ dev_set_drvdata(&sw_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_switch_set_drvdata);
-void *typec_switch_get_drvdata(struct typec_switch *sw)
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw_dev)
{
- return dev_get_drvdata(&sw->dev);
+ return dev_get_drvdata(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
/* ------------------------------------------------------------------------- */
+struct typec_mux {
+ struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
+ unsigned int num_mux_devs;
+};
+
static int mux_fwnode_match(struct device *dev, const void *fwnode)
{
- if (!is_typec_mux(dev))
+ if (!is_typec_mux_dev(dev))
return 0;
return dev_fwnode(dev) == fwnode;
@@ -246,7 +318,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -262,14 +334,50 @@ find_mux:
struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
const struct typec_altmode_desc *desc)
{
+ struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
struct typec_mux *mux;
+ int count;
+ int err;
+ int i;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ count = fwnode_connection_find_matches(fwnode, "mode-switch",
+ (void *)desc, typec_mux_match,
+ (void **)mux_devs,
+ ARRAY_SIZE(mux_devs));
+ if (count <= 0) {
+ kfree(mux);
+ return NULL;
+ }
- mux = fwnode_connection_find_match(fwnode, "mode-switch", (void *)desc,
- typec_mux_match);
- if (!IS_ERR_OR_NULL(mux))
- WARN_ON(!try_module_get(mux->dev.parent->driver->owner));
+ for (i = 0; i < count; i++) {
+ if (IS_ERR(mux_devs[i])) {
+ err = PTR_ERR(mux_devs[i]);
+ goto put_mux_devs;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(!try_module_get(mux_devs[i]->dev.parent->driver->owner));
+ mux->mux_devs[i] = mux_devs[i];
+ }
+
+ mux->num_mux_devs = count;
return mux;
+
+put_mux_devs:
+ for (i = 0; i < count; i++) {
+ if (!IS_ERR(mux_devs[i]))
+ put_device(&mux_devs[i]->dev);
+ }
+
+ kfree(mux);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
@@ -281,25 +389,45 @@ EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
*/
void typec_mux_put(struct typec_mux *mux)
{
- if (!IS_ERR_OR_NULL(mux)) {
- module_put(mux->dev.parent->driver->owner);
- put_device(&mux->dev);
+ struct typec_mux_dev *mux_dev;
+ unsigned int i;
+
+ if (IS_ERR_OR_NULL(mux))
+ return;
+
+ for (i = 0; i < mux->num_mux_devs; i++) {
+ mux_dev = mux->mux_devs[i];
+ module_put(mux_dev->dev.parent->driver->owner);
+ put_device(&mux_dev->dev);
}
+ kfree(mux);
}
EXPORT_SYMBOL_GPL(typec_mux_put);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
+ struct typec_mux_dev *mux_dev;
+ unsigned int i;
+ int ret;
+
if (IS_ERR_OR_NULL(mux))
return 0;
- return mux->set(mux, state);
+ for (i = 0; i < mux->num_mux_devs; i++) {
+ mux_dev = mux->mux_devs[i];
+
+ ret = mux_dev->set(mux_dev, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_mux_set);
static void typec_mux_release(struct device *dev)
{
- kfree(to_typec_mux(dev));
+ kfree(to_typec_mux_dev(dev));
}
const struct device_type typec_mux_dev_type = {
@@ -317,63 +445,66 @@ const struct device_type typec_mux_dev_type = {
* the pins on the connector need to be reconfigured. This function registers
* multiplexer switches routing the pins on the connector.
*/
-struct typec_mux *
+struct typec_mux_dev *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
{
- struct typec_mux *mux;
+ struct typec_mux_dev *mux_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
- mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux)
+ mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
+ if (!mux_dev)
return ERR_PTR(-ENOMEM);
- mux->set = desc->set;
+ mux_dev->set = desc->set;
- device_initialize(&mux->dev);
- mux->dev.parent = parent;
- mux->dev.fwnode = desc->fwnode;
- mux->dev.class = &typec_mux_class;
- mux->dev.type = &typec_mux_dev_type;
- mux->dev.driver_data = desc->drvdata;
- dev_set_name(&mux->dev, "%s-mux",
- desc->name ? desc->name : dev_name(parent));
+ device_initialize(&mux_dev->dev);
+ mux_dev->dev.parent = parent;
+ mux_dev->dev.fwnode = desc->fwnode;
+ mux_dev->dev.class = &typec_mux_class;
+ mux_dev->dev.type = &typec_mux_dev_type;
+ mux_dev->dev.driver_data = desc->drvdata;
+ ret = dev_set_name(&mux_dev->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&mux_dev->dev);
+ return ERR_PTR(ret);
+ }
- ret = device_add(&mux->dev);
+ ret = device_add(&mux_dev->dev);
if (ret) {
dev_err(parent, "failed to register mux (%d)\n", ret);
- put_device(&mux->dev);
+ put_device(&mux_dev->dev);
return ERR_PTR(ret);
}
- return mux;
+ return mux_dev;
}
EXPORT_SYMBOL_GPL(typec_mux_register);
/**
* typec_mux_unregister - Unregister Multiplexer Switch
- * @mux: USB Type-C Connector Multiplexer/DeMultiplexer
+ * @mux_dev: USB Type-C Connector Multiplexer/DeMultiplexer
*
* Unregister mux that was registered with typec_mux_register().
*/
-void typec_mux_unregister(struct typec_mux *mux)
+void typec_mux_unregister(struct typec_mux_dev *mux_dev)
{
- if (!IS_ERR_OR_NULL(mux))
- device_unregister(&mux->dev);
+ if (!IS_ERR_OR_NULL(mux_dev))
+ device_unregister(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_unregister);
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data)
+void typec_mux_set_drvdata(struct typec_mux_dev *mux_dev, void *data)
{
- dev_set_drvdata(&mux->dev, data);
+ dev_set_drvdata(&mux_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_mux_set_drvdata);
-void *typec_mux_get_drvdata(struct typec_mux *mux)
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
{
- return dev_get_drvdata(&mux->dev);
+ return dev_get_drvdata(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
diff --git a/drivers/usb/typec/mux.h b/drivers/usb/typec/mux.h
index b1d6e837cb74..58f0f28b6dc8 100644
--- a/drivers/usb/typec/mux.h
+++ b/drivers/usb/typec/mux.h
@@ -5,23 +5,23 @@
#include <linux/usb/typec_mux.h>
-struct typec_switch {
+struct typec_switch_dev {
struct device dev;
typec_switch_set_fn_t set;
};
-struct typec_mux {
+struct typec_mux_dev {
struct device dev;
typec_mux_set_fn_t set;
};
-#define to_typec_switch(_dev_) container_of(_dev_, struct typec_switch, dev)
-#define to_typec_mux(_dev_) container_of(_dev_, struct typec_mux, dev)
+#define to_typec_switch_dev(_dev_) container_of(_dev_, struct typec_switch_dev, dev)
+#define to_typec_mux_dev(_dev_) container_of(_dev_, struct typec_mux_dev, dev)
extern const struct device_type typec_switch_dev_type;
extern const struct device_type typec_mux_dev_type;
-#define is_typec_switch(dev) ((dev)->type == &typec_switch_dev_type)
-#define is_typec_mux(dev) ((dev)->type == &typec_mux_dev_type)
+#define is_typec_switch_dev(dev) ((dev)->type == &typec_switch_dev_type)
+#define is_typec_mux_dev(dev) ((dev)->type == &typec_mux_dev_type)
#endif /* __USB_TYPEC_MUX__ */
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index edead555835e..5eb2c17d72c1 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -2,6 +2,16 @@
menu "USB Type-C Multiplexer/DeMultiplexer Switch support"
+config TYPEC_MUX_FSA4480
+ tristate "ON Semi FSA4480 Analog Audio Switch driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the ON Semiconductor FSA4480 Analog Audio Switch, which
+ provides support for muxing analog audio and sideband signals on a
+ common USB Type-C connector.
+ If compiled as a module, the module will be named fsa4480.
+
config TYPEC_MUX_PI3USB30532
tristate "Pericom PI3USB30532 Type-C cross switch driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 280a6f553115..e52a56c16bfb 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
new file mode 100644
index 000000000000..6184f5367190
--- /dev/null
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2022 Linaro Ltd.
+ * Copyright (C) 2018-2020 The Linux Foundation
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define FSA4480_SWITCH_ENABLE 0x04
+#define FSA4480_SWITCH_SELECT 0x05
+#define FSA4480_SWITCH_STATUS1 0x07
+#define FSA4480_SLOW_L 0x08
+#define FSA4480_SLOW_R 0x09
+#define FSA4480_SLOW_MIC 0x0a
+#define FSA4480_SLOW_SENSE 0x0b
+#define FSA4480_SLOW_GND 0x0c
+#define FSA4480_DELAY_L_R 0x0d
+#define FSA4480_DELAY_L_MIC 0x0e
+#define FSA4480_DELAY_L_SENSE 0x0f
+#define FSA4480_DELAY_L_AGND 0x10
+#define FSA4480_RESET 0x1e
+#define FSA4480_MAX_REGISTER 0x1f
+
+#define FSA4480_ENABLE_DEVICE BIT(7)
+#define FSA4480_ENABLE_SBU GENMASK(6, 5)
+#define FSA4480_ENABLE_USB GENMASK(4, 3)
+
+#define FSA4480_SEL_SBU_REVERSE GENMASK(6, 5)
+#define FSA4480_SEL_USB GENMASK(4, 3)
+
+struct fsa4480 {
+ struct i2c_client *client;
+
+ /* used to serialize concurrent change requests */
+ struct mutex lock;
+
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+
+ struct regmap *regmap;
+
+ u8 cur_enable;
+ u8 cur_select;
+};
+
+static const struct regmap_config fsa4480_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FSA4480_MAX_REGISTER,
+ /* Accesses only done under fsa4480->lock */
+ .disable_locking = true,
+};
+
+static int fsa4480_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
+ u8 new_sel;
+
+ mutex_lock(&fsa->lock);
+ new_sel = FSA4480_SEL_USB;
+ if (orientation == TYPEC_ORIENTATION_REVERSE)
+ new_sel |= FSA4480_SEL_SBU_REVERSE;
+
+ if (new_sel == fsa->cur_select)
+ goto out_unlock;
+
+ if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+ /* Disable SBU output while re-configuring the switch */
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE,
+ fsa->cur_enable & ~FSA4480_ENABLE_SBU);
+
+ /* 35us to allow the SBU switch to turn off */
+ usleep_range(35, 1000);
+ }
+
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, new_sel);
+ fsa->cur_select = new_sel;
+
+ if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+ /* 15us to allow the SBU switch to turn on again */
+ usleep_range(15, 1000);
+ }
+
+out_unlock:
+ mutex_unlock(&fsa->lock);
+
+ return 0;
+}
+
+static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct fsa4480 *fsa = typec_mux_get_drvdata(mux);
+ u8 new_enable;
+
+ mutex_lock(&fsa->lock);
+
+ new_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+ if (state->mode >= TYPEC_DP_STATE_A)
+ new_enable |= FSA4480_ENABLE_SBU;
+
+ if (new_enable == fsa->cur_enable)
+ goto out_unlock;
+
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, new_enable);
+ fsa->cur_enable = new_enable;
+
+ if (new_enable & FSA4480_ENABLE_SBU) {
+ /* 15us to allow the SBU switch to turn off */
+ usleep_range(15, 1000);
+ }
+
+out_unlock:
+ mutex_unlock(&fsa->lock);
+
+ return 0;
+}
+
+static int fsa4480_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct fsa4480 *fsa;
+
+ fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL);
+ if (!fsa)
+ return -ENOMEM;
+
+ fsa->client = client;
+ mutex_init(&fsa->lock);
+
+ fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config);
+ if (IS_ERR(fsa->regmap))
+ return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+
+ fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+ fsa->cur_select = FSA4480_SEL_USB;
+
+ /* set default settings */
+ regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_R, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_MIC, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_SENSE, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_GND, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_R, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, fsa->cur_select);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+ sw_desc.drvdata = fsa;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = fsa4480_switch_set;
+
+ fsa->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(fsa->sw))
+ return dev_err_probe(dev, PTR_ERR(fsa->sw), "failed to register typec switch\n");
+
+ mux_desc.drvdata = fsa;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = fsa4480_mux_set;
+
+ fsa->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(fsa->mux)) {
+ typec_switch_unregister(fsa->sw);
+ return dev_err_probe(dev, PTR_ERR(fsa->mux), "failed to register typec mux\n");
+ }
+
+ i2c_set_clientdata(client, fsa);
+ return 0;
+}
+
+static int fsa4480_remove(struct i2c_client *client)
+{
+ struct fsa4480 *fsa = i2c_get_clientdata(client);
+
+ typec_mux_unregister(fsa->mux);
+ typec_switch_unregister(fsa->sw);
+
+ return 0;
+}
+
+static const struct i2c_device_id fsa4480_table[] = {
+ { "fsa4480" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fsa4480_table);
+
+static const struct of_device_id fsa4480_of_table[] = {
+ { .compatible = "fcs,fsa4480" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fsa4480_of_table);
+
+static struct i2c_driver fsa4480_driver = {
+ .driver = {
+ .name = "fsa4480",
+ .of_match_table = fsa4480_of_table,
+ },
+ .probe_new = fsa4480_probe,
+ .remove = fsa4480_remove,
+ .id_table = fsa4480_table,
+};
+module_i2c_driver(fsa4480_driver);
+
+MODULE_DESCRIPTION("ON Semiconductor FSA4480 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 2cdd22130834..47b733f78fb0 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -121,8 +121,8 @@ struct pmc_usb_port {
int num;
u32 iom_status;
struct pmc_usb *pmc;
- struct typec_mux *typec_mux;
- struct typec_switch *typec_sw;
+ struct typec_mux_dev *typec_mux;
+ struct typec_switch_dev *typec_sw;
struct usb_role_switch *usb_sw;
enum typec_orientation orientation;
@@ -173,7 +173,7 @@ static int hsl_orientation(struct pmc_usb_port *port)
return port->orientation - 1;
}
-static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+static int pmc_usb_send_command(struct intel_scu_ipc_dev *ipc, u8 *msg, u32 len)
{
u8 response[4];
u8 status_res;
@@ -184,7 +184,7 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
* Status can be checked from the response message if the
* function intel_scu_ipc_dev_command succeeds.
*/
- ret = intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg,
+ ret = intel_scu_ipc_dev_command(ipc, PMC_USBC_CMD, 0, msg,
len, response, sizeof(response));
if (ret)
@@ -203,6 +203,23 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
return 0;
}
+static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+{
+ int retry_count = 3;
+ int ret;
+
+ /*
+ * If PMC is busy then retry the command once again
+ */
+ while (retry_count--) {
+ ret = pmc_usb_send_command(port->pmc->ipc, msg, len);
+ if (ret != -EBUSY)
+ break;
+ }
+
+ return ret;
+}
+
static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
@@ -416,7 +433,7 @@ static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role)
}
static int
-pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
@@ -452,7 +469,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
return -EOPNOTSUPP;
}
-static int pmc_usb_set_orientation(struct typec_switch *sw,
+static int pmc_usb_set_orientation(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 7afe275b17d0..6ce9f282594e 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -23,8 +23,8 @@
struct pi3usb30532 {
struct i2c_client *client;
struct mutex lock; /* protects the cached conf register */
- struct typec_switch *sw;
- struct typec_mux *mux;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
u8 conf;
};
@@ -45,7 +45,7 @@ static int pi3usb30532_set_conf(struct pi3usb30532 *pi, u8 new_conf)
return 0;
}
-static int pi3usb30532_sw_set(struct typec_switch *sw,
+static int pi3usb30532_sw_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pi3usb30532 *pi = typec_switch_get_drvdata(sw);
@@ -74,7 +74,7 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
}
static int
-pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pi3usb30532_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 72f9001b0792..96c55eaf3f80 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client,
*/
if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
chip->extcon = extcon_get_extcon_dev(name);
- if (!chip->extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(chip->extcon))
+ return PTR_ERR(chip->extcon);
}
chip->vbus = devm_regulator_get(chip->dev, "vbus");
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 16b4560216ba..dfbba5ae9487 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -93,6 +93,8 @@ struct tps6598x {
struct power_supply *psy;
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
+
+ u16 pwr_status;
};
static enum power_supply_property tps6598x_psy_props[] = {
@@ -230,17 +232,12 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
{
struct typec_partner_desc desc;
enum typec_pwr_opmode mode;
- u16 pwr_status;
int ret;
if (tps->partner)
return 0;
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
-
- mode = TPS_POWER_STATUS_PWROPMODE(pwr_status);
+ mode = TPS_POWER_STATUS_PWROPMODE(tps->pwr_status);
desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
@@ -455,6 +452,7 @@ static bool tps6598x_read_power_status(struct tps6598x *tps)
dev_err(tps->dev, "failed to read power status: %d\n", ret);
return false;
}
+ tps->pwr_status = pwr_status;
trace_tps6598x_power_status(pwr_status);
return true;
@@ -601,15 +599,8 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_psy_get_online(struct tps6598x *tps,
union power_supply_propval *val)
{
- int ret;
- u16 pwr_status;
-
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
-
- if (TPS_POWER_STATUS_CONNECTION(pwr_status) &&
- TPS_POWER_STATUS_SOURCESINK(pwr_status)) {
+ if (TPS_POWER_STATUS_CONNECTION(tps->pwr_status) &&
+ TPS_POWER_STATUS_SOURCESINK(tps->pwr_status)) {
val->intval = 1;
} else {
val->intval = 0;
@@ -622,15 +613,11 @@ static int tps6598x_psy_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct tps6598x *tps = power_supply_get_drvdata(psy);
- u16 pwr_status;
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
- if (TPS_POWER_STATUS_PWROPMODE(pwr_status) == TYPEC_PWR_MODE_PD)
+ if (TPS_POWER_STATUS_PWROPMODE(tps->pwr_status) == TYPEC_PWR_MODE_PD)
val->intval = POWER_SUPPLY_USB_TYPE_PD;
else
val->intval = POWER_SUPPLY_USB_TYPE_C;
@@ -837,6 +824,11 @@ static int tps6598x_probe(struct i2c_client *client)
fwnode_handle_put(fwnode);
if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to read power status: %d\n", ret);
+ goto err_role_put;
+ }
ret = tps6598x_connect(tps, status);
if (ret)
dev_err(&client->dev, "failed to register partner\n");
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index a6045aef0d04..cbd862f9f2a1 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1063,6 +1063,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num = index + 1;
con->ucsi = ucsi;
+ cap->fwnode = ucsi_find_fwnode(con);
+ con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
+ if (IS_ERR(con->usb_role_sw)) {
+ dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
+ con->num);
+ return PTR_ERR(con->usb_role_sw);
+ }
+
/* Delay other interactions with the con until registration is complete */
mutex_lock(&con->lock);
@@ -1098,7 +1106,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY)
*accessory = TYPEC_ACCESSORY_DEBUG;
- cap->fwnode = ucsi_find_fwnode(con);
cap->driver_data = con;
cap->ops = &ucsi_ops;
@@ -1156,13 +1163,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ucsi_port_psy_changed(con);
}
- con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
- if (IS_ERR(con->usb_role_sw)) {
- dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
- con->num);
- con->usb_role_sw = NULL;
- }
-
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
@@ -1196,6 +1196,32 @@ out_unlock:
return ret;
}
+static void ucsi_unregister_connectors(struct ucsi *ucsi)
+{
+ struct ucsi_connector *con;
+ int i;
+
+ if (!ucsi->connector)
+ return;
+
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ con = &ucsi->connector[i];
+
+ if (!con->wq)
+ break;
+
+ cancel_work_sync(&con->work);
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ }
+
+ kfree(ucsi->connector);
+ ucsi->connector = NULL;
+}
+
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1204,7 +1230,6 @@ out_unlock:
*/
static int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1235,7 +1260,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1259,15 +1284,7 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- for (con = ucsi->connector; con->port; con++) {
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- con->port = NULL;
- }
+ ucsi_unregister_connectors(ucsi);
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1278,12 +1295,20 @@ err:
static void ucsi_init_work(struct work_struct *work)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
int ret;
ret = ucsi_init(ucsi);
if (ret)
dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ if (ret == -EPROBE_DEFER) {
+ if (ucsi->work_count++ > UCSI_ROLE_SWITCH_WAIT_COUNT)
+ return;
+
+ queue_delayed_work(system_long_wq, &ucsi->work,
+ UCSI_ROLE_SWITCH_INTERVAL);
+ }
}
/**
@@ -1323,7 +1348,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init_work);
+ INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
ucsi->dev = dev;
ucsi->ops = ops;
@@ -1358,7 +1383,7 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
- queue_work(system_long_wq, &ucsi->work);
+ queue_delayed_work(system_long_wq, &ucsi->work, 0);
return 0;
}
@@ -1373,26 +1398,14 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
- int i;
/* Make sure that we are not in the middle of driver initialization */
- cancel_work_sync(&ucsi->work);
+ cancel_delayed_work_sync(&ucsi->work);
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
- if (ucsi->connector[i].wq)
- destroy_workqueue(ucsi->connector[i].wq);
- typec_unregister_port(ucsi->connector[i].port);
- }
-
- kfree(ucsi->connector);
+ ucsi_unregister_connectors(ucsi);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 280f1e1bda2c..8eb391e3e592 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -287,7 +287,11 @@ struct ucsi {
struct ucsi_capability cap;
struct ucsi_connector *connector;
- struct work_struct work;
+ struct delayed_work work;
+ int work_count;
+#define UCSI_ROLE_SWITCH_RETRY_PER_HZ 10
+#define UCSI_ROLE_SWITCH_INTERVAL (HZ / UCSI_ROLE_SWITCH_RETRY_PER_HZ)
+#define UCSI_ROLE_SWITCH_WAIT_COUNT (10 * UCSI_ROLE_SWITCH_RETRY_PER_HZ)
/* PPM Communication lock */
struct mutex ppm_lock;
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index d8d3892e5a69..3c6d452e3bf4 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev)
err_port:
dev_set_drvdata(&udev->dev, NULL);
- usb_put_dev(udev);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
@@ -408,6 +407,7 @@ call_put_busid_priv:
put_busid_priv(busid_priv);
sdev_free:
+ usb_put_dev(udev);
stub_device_free(sdev);
return rc;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 325c22008e53..5dd41e8215e0 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb)
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
+ usb_lock_device(sdev->udev);
err = usb_set_configuration(sdev->udev, config);
+ usb_unlock_device(sdev->udev);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index f480d54f308c..5a09a09cca70 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -470,7 +470,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
- dev, &eni_vdpa_ops, NULL, false);
+ dev, &eni_vdpa_ops, 1, 1, NULL, false);
if (IS_ERR(eni_vdpa)) {
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
return PTR_ERR(eni_vdpa);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 4366320fb68d..0a5670729412 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -290,16 +290,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
struct ifcvf_hw *vf = &adapter->vf;
int config_vector, ret;
- if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
- return 0;
-
if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
- /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
config_vector = vf->nr_vring;
-
- if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+ else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
/* vector 0 for vqs and 1 for config interrupt */
config_vector = 1;
+ else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+ /* re-use the vqs vector */
+ return 0;
+ else
+ return -EINVAL;
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
@@ -626,6 +626,11 @@ static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
return vf->config_size;
}
+static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
@@ -704,6 +709,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_vq_group = ifcvf_vdpa_get_vq_group,
.get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
@@ -758,14 +764,13 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
pdev = ifcvf_mgmt_dev->pdev;
dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, name, false);
+ dev, &ifc_vdpa_ops, 1, 1, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
ifcvf_mgmt_dev->adapter = adapter;
- pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
vf->dev_type = get_dev_type(pdev);
@@ -880,6 +885,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
+
return 0;
err:
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index daaf7b503677..44104093163b 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -61,6 +61,8 @@ struct mlx5_control_vq {
struct vringh_kiov riov;
struct vringh_kiov wiov;
unsigned short head;
+ unsigned int received_desc;
+ unsigned int completed_desc;
};
struct mlx5_vdpa_wq_ent {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e0de44000d92..b7a955479156 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -48,6 +48,8 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+#define MLX5V_UNTAGGED 0x1000
+
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
@@ -119,6 +121,7 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vdpa_umem umem2;
struct mlx5_vdpa_umem umem3;
+ u32 counter_set_id;
bool initialized;
int index;
u32 virtq_id;
@@ -143,6 +146,8 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
return idx <= mvdev->max_idx;
}
+#define MLX5V_MACVLAN_SIZE 256
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
@@ -154,17 +159,22 @@ struct mlx5_vdpa_net {
* since memory map might change and we need to destroy and create
* resources while driver in operational.
*/
- struct mutex reslock;
+ struct rw_semaphore reslock;
struct mlx5_flow_table *rxft;
- struct mlx5_fc *rx_counter;
- struct mlx5_flow_handle *rx_rule_ucast;
- struct mlx5_flow_handle *rx_rule_mcast;
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
+ struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+};
+
+struct macvlan_node {
+ struct hlist_node hlist;
+ struct mlx5_flow_handle *ucast_rule;
+ struct mlx5_flow_handle *mcast_rule;
+ u64 macvlan;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -818,6 +828,12 @@ static u16 get_features_12_3(u64 features)
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
}
+static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
+{
+ return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
+ BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+}
+
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
@@ -872,6 +888,8 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ if (counters_supported(&ndev->mvdev))
+ MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1135,6 +1153,47 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
return err;
}
+static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return 0;
+
+ cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {};
+
+ if (!counters_supported(&ndev->mvdev))
+ return;
+
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+ mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
+}
+
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u16 idx = mvq->index;
@@ -1162,6 +1221,10 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (err)
goto err_connect;
+ err = counter_set_alloc(ndev, mvq);
+ if (err)
+ goto err_counter;
+
err = create_virtqueue(ndev, mvq);
if (err)
goto err_connect;
@@ -1179,6 +1242,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
return 0;
err_connect:
+ counter_set_dealloc(ndev, mvq);
+err_counter:
qp_destroy(ndev, &mvq->vqqp);
err_vqqp:
qp_destroy(ndev, &mvq->fwqp);
@@ -1223,6 +1288,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
suspend_vq(ndev, mvq);
destroy_virtqueue(ndev, mvq);
+ counter_set_dealloc(ndev, mvq);
qp_destroy(ndev, &mvq->vqqp);
qp_destroy(ndev, &mvq->fwqp);
cq_destroy(ndev, mvq->index);
@@ -1347,12 +1413,17 @@ static void destroy_tir(struct mlx5_vdpa_net *ndev)
mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
}
-static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+#define MAX_STEERING_ENT 0x8000
+#define MAX_STEERING_GROUPS 2
+
+static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
+ u16 vid, bool tagged,
+ struct mlx5_flow_handle **ucast,
+ struct mlx5_flow_handle **mcast)
{
- struct mlx5_flow_destination dest[2] = {};
- struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
void *headers_c;
void *headers_v;
@@ -1365,85 +1436,178 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
return -ENOMEM;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- ft_attr.max_fte = 2;
- ft_attr.autogroup.max_num_groups = 2;
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
+ dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
+ memset(dmac_c, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac_v, mac);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ if (tagged) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
+ }
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = ndev->res.tirn;
+ rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
- ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
- if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
- err = -EOPNOTSUPP;
- goto err_ns;
+ *ucast = rule;
+
+ memset(dmac_c, 0, ETH_ALEN);
+ memset(dmac_v, 0, ETH_ALEN);
+ dmac_c[0] = 1;
+ dmac_v[0] = 1;
+ rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_mcast;
}
- ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(ndev->rxft)) {
- err = PTR_ERR(ndev->rxft);
- goto err_ns;
+ *mcast = rule;
+ return 0;
+
+err_mcast:
+ mlx5_del_flow_rules(*ucast);
+ return err;
+}
+
+static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
+ struct mlx5_flow_handle *ucast,
+ struct mlx5_flow_handle *mcast)
+{
+ mlx5_del_flow_rules(ucast);
+ mlx5_del_flow_rules(mcast);
+}
+
+static u64 search_val(u8 *mac, u16 vlan, bool tagged)
+{
+ u64 val;
+
+ if (!tagged)
+ vlan = MLX5V_UNTAGGED;
+
+ val = (u64)vlan << 48 |
+ (u64)mac[0] << 40 |
+ (u64)mac[1] << 32 |
+ (u64)mac[2] << 24 |
+ (u64)mac[3] << 16 |
+ (u64)mac[4] << 8 |
+ (u64)mac[5];
+
+ return val;
+}
+
+static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
+{
+ struct macvlan_node *pos;
+ u32 idx;
+
+ idx = hash_64(value, 8); // tbd 8
+ hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
+ if (pos->macvlan == value)
+ return pos;
}
+ return NULL;
+}
+
+static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid
+{
+ struct macvlan_node *ptr;
+ u64 val;
+ u32 idx;
+ int err;
+
+ val = search_val(mac, vlan, tagged);
+ if (mac_vlan_lookup(ndev, val))
+ return -EEXIST;
+
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged,
+ &ptr->ucast_rule, &ptr->mcast_rule);
+ if (err)
+ goto err_add;
+
+ ptr->macvlan = val;
+ idx = hash_64(val, 8);
+ hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
+ return 0;
+
+err_add:
+ kfree(ptr);
+ return err;
+}
+
+static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
+{
+ struct macvlan_node *ptr;
+
+ ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
+ if (!ptr)
+ return;
+
+ hlist_del(&ptr->hlist);
+ mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule);
+ kfree(ptr);
+}
+
+static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
+{
+ struct macvlan_node *pos;
+ struct hlist_node *n;
+ int i;
- ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
- if (IS_ERR(ndev->rx_counter)) {
- err = PTR_ERR(ndev->rx_counter);
- goto err_fc;
+ for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
+ hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
+ hlist_del(&pos->hlist);
+ mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule);
+ kfree(pos);
+ }
}
+}
- headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
- dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
- dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, ndev->config.mac);
+static int setup_steering(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest[0].tir_num = ndev->res.tirn;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
- ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 2);
+ ft_attr.max_fte = MAX_STEERING_ENT;
+ ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
- if (IS_ERR(ndev->rx_rule_ucast)) {
- err = PTR_ERR(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
- goto err_rule_ucast;
+ ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ return -EOPNOTSUPP;
}
- memset(dmac_c, 0, ETH_ALEN);
- memset(dmac_v, 0, ETH_ALEN);
- dmac_c[0] = 1;
- dmac_v[0] = 1;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 1);
- if (IS_ERR(ndev->rx_rule_mcast)) {
- err = PTR_ERR(ndev->rx_rule_mcast);
- ndev->rx_rule_mcast = NULL;
- goto err_rule_mcast;
+ ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ndev->rxft)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ return PTR_ERR(ndev->rxft);
}
- kvfree(spec);
+ err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
+ if (err)
+ goto err_add;
+
return 0;
-err_rule_mcast:
- mlx5_del_flow_rules(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
-err_rule_ucast:
- mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
-err_fc:
+err_add:
mlx5_destroy_flow_table(ndev->rxft);
-err_ns:
- kvfree(spec);
return err;
}
-static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+static void teardown_steering(struct mlx5_vdpa_net *ndev)
{
- if (!ndev->rx_rule_ucast)
- return;
-
- mlx5_del_flow_rules(ndev->rx_rule_mcast);
- ndev->rx_rule_mcast = NULL;
- mlx5_del_flow_rules(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
- mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+ clear_mac_vlan_table(ndev);
mlx5_destroy_flow_table(ndev->rxft);
}
@@ -1494,9 +1658,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
/* Need recreate the flow table entry, so that the packet could forward back
*/
- remove_fwd_to_tir(ndev);
+ mac_vlan_del(ndev, ndev->config.mac, 0, false);
- if (add_fwd_to_tir(ndev)) {
+ if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
/* Although it hardly run here, we still need double check */
@@ -1520,7 +1684,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
- if (add_fwd_to_tir(ndev))
+ if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
break;
@@ -1622,6 +1786,42 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
return status;
}
+static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ __virtio16 vlan;
+ size_t read;
+ u16 id;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_VLAN_ADD:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+ if (read != sizeof(vlan))
+ break;
+
+ id = mlx5vdpa16_to_cpu(mvdev, vlan);
+ if (mac_vlan_add(ndev, ndev->config.mac, id, true))
+ break;
+
+ status = VIRTIO_NET_OK;
+ break;
+ case VIRTIO_NET_CTRL_VLAN_DEL:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+ if (read != sizeof(vlan))
+ break;
+
+ id = mlx5vdpa16_to_cpu(mvdev, vlan);
+ mac_vlan_del(ndev, ndev->config.mac, id, true);
+ break;
+ default:
+ break;
+}
+
+return status;
+}
+
static void mlx5_cvq_kick_handler(struct work_struct *work)
{
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
@@ -1638,7 +1838,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
@@ -1659,6 +1859,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (read != sizeof(ctrl))
break;
+ cvq->received_desc++;
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
status = handle_ctrl_mac(mvdev, ctrl.cmd);
@@ -1666,7 +1867,9 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
case VIRTIO_NET_CTRL_MQ:
status = handle_ctrl_mq(mvdev, ctrl.cmd);
break;
-
+ case VIRTIO_NET_CTRL_VLAN:
+ status = handle_ctrl_vlan(mvdev, ctrl.cmd);
+ break;
default:
break;
}
@@ -1682,12 +1885,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
+ cvq->completed_desc++;
queue_work(mvdev->wq, &wqent->work);
break;
}
out:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1888,6 +2092,11 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
@@ -1925,6 +2134,7 @@ static u64 get_supported_features(struct mlx5_core_dev *mdev)
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
return mlx_vdpa_features;
}
@@ -2185,7 +2395,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
@@ -2210,9 +2420,9 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto err_tir;
}
- err = add_fwd_to_tir(ndev);
+ err = setup_steering(ndev);
if (err) {
- mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
+ mlx5_vdpa_warn(mvdev, "setup_steering\n");
goto err_fwd;
}
ndev->setup = true;
@@ -2233,12 +2443,12 @@ out:
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (!ndev->setup)
return;
- remove_fwd_to_tir(ndev);
+ teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
teardown_virtqueues(ndev);
@@ -2263,7 +2473,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true);
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
@@ -2279,14 +2489,14 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
}
ndev->mvdev.status = status;
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return;
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2297,12 +2507,14 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->cur_num_vqs = 0;
+ ndev->mvdev.cvq.received_desc = 0;
+ ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
@@ -2310,7 +2522,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return 0;
}
@@ -2343,14 +2555,15 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
@@ -2362,7 +2575,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
err = mlx5_vdpa_change_map(mvdev, iotlb);
err:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return err;
}
@@ -2381,7 +2594,6 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
- mutex_destroy(&ndev->reslock);
kfree(ndev->event_cbs);
kfree(ndev->vqs);
}
@@ -2422,6 +2634,93 @@ static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
return mvdev->actual_features;
}
+static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ u64 *received_desc, u64 *completed_desc)
+{
+ u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ void *ctx;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return -EOPNOTSUPP;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return -EAGAIN;
+
+ cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters);
+ *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc);
+ *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc);
+ return 0;
+}
+
+static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_control_vq *cvq;
+ u64 received_desc;
+ u64 completed_desc;
+ int err = 0;
+
+ down_read(&ndev->reslock);
+ if (!is_index_valid(mvdev, idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (idx == ctrl_vq_idx(mvdev)) {
+ cvq = &mvdev->cvq;
+ received_desc = cvq->received_desc;
+ completed_desc = cvq->completed_desc;
+ goto out;
+ }
+
+ mvq = &ndev->vqs[idx];
+ err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "failed to query hardware");
+ goto out_err;
+ }
+
+out:
+ err = -EMSGSIZE;
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ err = 0;
+out_err:
+ up_read(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2431,9 +2730,11 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_ready = mlx5_vdpa_get_vq_ready,
.set_vq_state = mlx5_vdpa_set_vq_state,
.get_vq_state = mlx5_vdpa_get_vq_state,
+ .get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats,
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
+ .get_vq_group = mlx5_vdpa_get_vq_group,
.get_device_features = mlx5_vdpa_get_device_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
@@ -2669,7 +2970,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name, false);
+ 1, 1, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2686,18 +2987,18 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
init_mvqs(ndev);
- mutex_init(&ndev->reslock);
+ init_rwsem(&ndev->reslock);
config = &ndev->config;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
err = config_func_mtu(mdev, add_config->net.mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
err = query_mtu(mdev, &mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
@@ -2711,14 +3012,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
} else {
err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
@@ -2768,8 +3069,6 @@ err_res:
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
-err_mtu:
- mutex_destroy(&ndev->reslock);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 2b75c00b1005..ebf2f363fbe7 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -18,14 +18,14 @@
static LIST_HEAD(mdev_head);
/* A global mutex that protects vdpa management device and device level operations. */
-static DEFINE_MUTEX(vdpa_dev_mutex);
+static DECLARE_RWSEM(vdpa_dev_lock);
static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_status(vdev, status);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL(vdpa_set_status);
@@ -77,32 +77,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
- const char *driver_override, *old;
- char *cp;
+ int ret;
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = vdev->driver_override;
- if (strlen(driver_override)) {
- vdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- vdev->driver_override = NULL;
- }
- device_unlock(dev);
-
- kfree(old);
+ ret = driver_set_override(dev, &vdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -148,7 +127,6 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index);
- mutex_destroy(&vdev->cf_mutex);
kfree(vdev->driver_override);
kfree(vdev);
}
@@ -159,6 +137,8 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @ngroups: number of groups supported by this device
+ * @nas: number of address spaces supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
* @use_va: indicate whether virtual address must be used by this device
@@ -171,6 +151,7 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va)
{
@@ -203,6 +184,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->config = config;
vdev->features_valid = false;
vdev->use_va = use_va;
+ vdev->ngroups = ngroups;
+ vdev->nas = nas;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
@@ -211,7 +194,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (err)
goto err_name;
- mutex_init(&vdev->cf_mutex);
+ init_rwsem(&vdev->cf_lock);
device_initialize(&vdev->dev);
return vdev;
@@ -238,7 +221,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
vdev->nvqs = nvqs;
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
if (dev) {
put_device(dev);
@@ -278,9 +261,9 @@ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
err = __vdpa_register_device(vdev, nvqs);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
@@ -293,7 +276,7 @@ EXPORT_SYMBOL_GPL(vdpa_register_device);
*/
void _vdpa_unregister_device(struct vdpa_device *vdev)
{
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
WARN_ON(!vdev->mdev);
device_unregister(&vdev->dev);
}
@@ -305,9 +288,9 @@ EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
*/
void vdpa_unregister_device(struct vdpa_device *vdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
device_unregister(&vdev->dev);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
@@ -352,9 +335,9 @@ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
return -EINVAL;
INIT_LIST_HEAD(&mdev->list);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_add_tail(&mdev->list, &mdev_head);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return 0;
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
@@ -371,14 +354,14 @@ static int vdpa_match_remove(struct device *dev, void *data)
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_del(&mdev->list);
/* Filter out all the entries belong to this management device and delete it. */
bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
@@ -407,9 +390,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len);
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -423,9 +406,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config);
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int length)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_set_config);
@@ -532,17 +515,17 @@ static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *i
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
err = PTR_ERR(mdev);
goto out;
}
err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
goto out;
err = genlmsg_reply(msg, info);
@@ -561,7 +544,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
int idx = 0;
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
list_for_each_entry(mdev, &mdev_head, list) {
if (idx < start) {
idx++;
@@ -574,7 +557,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
idx++;
}
out:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = idx;
return msg->len;
}
@@ -627,7 +610,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
@@ -643,7 +626,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
err = mdev->ops->dev_add(mdev, name, &config);
err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -659,7 +642,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -677,7 +660,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -743,7 +726,7 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -756,14 +739,19 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
goto mdev_err;
}
err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
- if (!err)
- err = genlmsg_reply(msg, info);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+ put_device(dev);
+ up_read(&vdpa_dev_lock);
+ return err;
+
mdev_err:
put_device(dev);
err:
- mutex_unlock(&vdpa_dev_mutex);
- if (err)
- nlmsg_free(msg);
+ up_read(&vdpa_dev_lock);
+ nlmsg_free(msg);
return err;
}
@@ -804,9 +792,9 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
@@ -861,7 +849,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
u8 status;
int err;
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
status = vdev->config->get_status(vdev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
@@ -898,14 +886,116 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
out:
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ struct virtio_net_config config = {};
+ u64 features;
+ u16 max_vqp;
+ u8 status;
+ int err;
+
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
+ return -EAGAIN;
+ }
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
+
+ max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
+ return -EMSGSIZE;
+
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
+ features, VDPA_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
+ return -EMSGSIZE;
+
+ err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ int err;
+
+ down_read(&vdev->cf_lock);
+ if (!vdev->config->get_vendor_vq_stats) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = vdpa_fill_stats_rec(vdev, msg, info, index);
+out:
+ up_read(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ u32 device_id;
+ void *hdr;
+ int err;
+ u32 portid = info->snd_portid;
+ u32 seq = info->snd_seq;
+ u32 flags = 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+ VDPA_CMD_DEV_VSTATS_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ device_id = vdev->config->get_device_id(vdev);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ switch (device_id) {
+ case VIRTIO_ID_NET:
+ if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
+ NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
+ err = -ERANGE;
+ break;
+ }
+
+ err = vendor_stats_fill(vdev, msg, info, index);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ genlmsg_end(msg, hdr);
+
+ return err;
+
+undo_msg:
+ genlmsg_cancel(msg, hdr);
return err;
}
@@ -924,7 +1014,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -945,7 +1035,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
nlmsg_free(msg);
return err;
@@ -983,13 +1073,67 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
+static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct sk_buff *msg;
+ const char *devname;
+ struct device *dev;
+ u32 index;
+ int err;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
+ return -EINVAL;
+
+ devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
+ down_read(&vdpa_dev_lock);
+ dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+
+ put_device(dev);
+ up_read(&vdpa_dev_lock);
+
+ return err;
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ nlmsg_free(msg);
+ up_read(&vdpa_dev_lock);
+ return err;
+}
+
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
@@ -1030,6 +1174,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_config_get_doit,
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
},
+ {
+ .cmd = VDPA_CMD_DEV_VSTATS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_stats_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index ddbe142af09a..0f2865899647 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -96,11 +96,17 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
- for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ spin_lock(&vdpasim->iommu_lock);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
+ &vdpasim->iommu_lock);
+ }
+
+ for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ vhost_iotlb_reset(&vdpasim->iommu[i]);
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_reset(vdpasim->iommu);
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -145,7 +151,7 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
dma_addr = iova_dma_addr(&vdpasim->iova, iova);
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+ ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
(u64)dma_addr + size - 1, (u64)paddr, perm);
spin_unlock(&vdpasim->iommu_lock);
@@ -161,7 +167,7 @@ static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
size_t size)
{
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+ vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
(u64)dma_addr + size - 1);
spin_unlock(&vdpasim->iommu_lock);
@@ -251,6 +257,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+ dev_attr->ngroups, dev_attr->nas,
dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
@@ -278,16 +285,20 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
if (!vdpasim->vqs)
goto err_iommu;
- vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
+ vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
+ sizeof(*vdpasim->iommu), GFP_KERNEL);
if (!vdpasim->iommu)
goto err_iommu;
+ for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
goto err_iommu;
for (i = 0; i < dev_attr->nvqs; i++)
- vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
ret = iova_cache_get();
@@ -353,11 +364,14 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ bool old_ready;
spin_lock(&vdpasim->lock);
+ old_ready = vq->ready;
vq->ready = ready;
- if (vq->ready)
+ if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
+ }
spin_unlock(&vdpasim->lock);
}
@@ -399,6 +413,15 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
+static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ /* RX and TX belongs to group 0, CVQ belongs to group 1 */
+ if (idx == 2)
+ return 1;
+ else
+ return 0;
+}
+
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -534,20 +557,53 @@ static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
return range;
}
-static int vdpasim_set_map(struct vdpa_device *vdpa,
+static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
+ unsigned int asid)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vhost_iotlb *iommu;
+ int i;
+
+ if (group > vdpasim->dev_attr.ngroups)
+ return -EINVAL;
+
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
+ iommu = &vdpasim->iommu[asid];
+
+ spin_lock(&vdpasim->lock);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ if (vdpasim_get_vq_group(vdpa, i) == group)
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
+ &vdpasim->iommu_lock);
+
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
+static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iommu;
u64 start = 0ULL, last = 0ULL - 1;
int ret;
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_reset(vdpasim->iommu);
+
+ iommu = &vdpasim->iommu[asid];
+ vhost_iotlb_reset(iommu);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
- ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
+ ret = vhost_iotlb_add_range(iommu, map->start,
map->last, map->addr, map->perm);
if (ret)
goto err;
@@ -556,31 +612,39 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
return 0;
err:
- vhost_iotlb_reset(vdpasim->iommu);
+ vhost_iotlb_reset(iommu);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
-static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
+static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
+ u64 iova, u64 size,
u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
- pa, perm, opaque);
+ ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
+ iova + size - 1, pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
-static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
+ u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+ vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
spin_unlock(&vdpasim->iommu_lock);
return 0;
@@ -604,8 +668,7 @@ static void vdpasim_free(struct vdpa_device *vdpa)
}
kvfree(vdpasim->buffer);
- if (vdpasim->iommu)
- vhost_iotlb_free(vdpasim->iommu);
+ vhost_iotlb_free(vdpasim->iommu);
kfree(vdpasim->vqs);
kfree(vdpasim->config);
}
@@ -620,6 +683,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
@@ -635,6 +699,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
+ .set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
@@ -650,6 +715,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
@@ -665,6 +731,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
+ .set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
.free = vdpasim_free,
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index cd58e888bcf3..622782e92239 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -41,6 +41,8 @@ struct vdpasim_dev_attr {
size_t buffer_size;
int nvqs;
u32 id;
+ u32 ngroups;
+ u32 nas;
work_func_t work_fn;
void (*get_config)(struct vdpasim *vdpasim, void *config);
@@ -63,6 +65,7 @@ struct vdpasim {
u32 status;
u32 generation;
u64 features;
+ u32 groups;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index d5324f6fd8c7..5125976a4df8 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -26,9 +26,122 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
- (1ULL << VIRTIO_NET_F_MAC))
+ (1ULL << VIRTIO_NET_F_MAC) | \
+ (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+ (1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR))
-#define VDPASIM_NET_VQ_NUM 2
+/* 3 virtqueues, 2 address spaces, 2 virtqueue groups */
+#define VDPASIM_NET_VQ_NUM 3
+#define VDPASIM_NET_AS_NUM 2
+#define VDPASIM_NET_GROUP_NUM 2
+
+static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len)
+{
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&vq->vring, vq->head, len);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&vq->vring) > 0)
+ vringh_notify(&vq->vring);
+ local_bh_enable();
+}
+
+static bool receive_filter(struct vdpasim *vdpasim, size_t len)
+{
+ bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1);
+ size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
+ sizeof(struct virtio_net_hdr);
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ if (len < ETH_ALEN + hdr_len)
+ return false;
+
+ if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
+ return true;
+
+ return false;
+}
+
+static virtio_net_ctrl_ack vdpasim_handle_ctrl_mac(struct vdpasim *vdpasim,
+ u8 cmd)
+{
+ struct virtio_net_config *vio_config = vdpasim->config;
+ struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ size_t read;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov,
+ vio_config->mac, ETH_ALEN);
+ if (read == ETH_ALEN)
+ status = VIRTIO_NET_OK;
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static void vdpasim_handle_cvq(struct vdpasim *vdpasim)
+{
+ struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct virtio_net_ctrl_hdr ctrl;
+ size_t read, write;
+ int err;
+
+ if (!(vdpasim->features & (1ULL << VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ if (!cvq->ready)
+ return;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov,
+ &cvq->out_iov,
+ &cvq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl,
+ sizeof(ctrl));
+ if (read != sizeof(ctrl))
+ break;
+
+ switch (ctrl.class) {
+ case VIRTIO_NET_CTRL_MAC:
+ status = vdpasim_handle_ctrl_mac(vdpasim, ctrl.cmd);
+ break;
+ default:
+ break;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov,
+ &status, sizeof(status));
+ vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+ vringh_kiov_cleanup(&cvq->in_iov);
+ vringh_kiov_cleanup(&cvq->out_iov);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (cvq->cb)
+ cvq->cb(cvq->private);
+ local_bh_enable();
+ }
+}
static void vdpasim_net_work(struct work_struct *work)
{
@@ -36,7 +149,6 @@ static void vdpasim_net_work(struct work_struct *work)
struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
ssize_t read, write;
- size_t total_write;
int pkts = 0;
int err;
@@ -45,53 +157,40 @@ static void vdpasim_net_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ vdpasim_handle_cvq(vdpasim);
+
if (!txq->ready || !rxq->ready)
goto out;
while (true) {
- total_write = 0;
err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
&txq->head, GFP_ATOMIC);
if (err <= 0)
break;
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+
+ if (!receive_filter(vdpasim, read)) {
+ vdpasim_net_complete(txq, 0);
+ continue;
+ }
+
err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
&rxq->head, GFP_ATOMIC);
if (err <= 0) {
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vdpasim_net_complete(txq, 0);
break;
}
- while (true) {
- read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
- vdpasim->buffer,
- PAGE_SIZE);
- if (read <= 0)
- break;
-
- write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
- vdpasim->buffer, read);
- if (write <= 0)
- break;
-
- total_write += write;
- }
-
- /* Make sure data is wrote before advancing index */
- smp_wmb();
-
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
-
- /* Make sure used is visible before rasing the interrupt. */
- smp_wmb();
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
- local_bh_disable();
- if (vringh_need_notify_iotlb(&txq->vring) > 0)
- vringh_notify(&txq->vring);
- if (vringh_need_notify_iotlb(&rxq->vring) > 0)
- vringh_notify(&rxq->vring);
- local_bh_enable();
+ vdpasim_net_complete(txq, 0);
+ vdpasim_net_complete(rxq, write);
if (++pkts > 4) {
schedule_work(&vdpasim->work);
@@ -145,6 +244,8 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_NET;
dev_attr.supported_features = VDPASIM_NET_FEATURES;
dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_NET_GROUP_NUM;
+ dev_attr.nas = VDPASIM_NET_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_net_config);
dev_attr.get_config = vdpasim_net_get_config;
dev_attr.work_fn = vdpasim_net_work;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index f85d1a08ed87..d503848b3b6e 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -693,6 +693,7 @@ static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
}
static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+ unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -1495,7 +1496,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
- &vduse_vdpa_config_ops, name, true);
+ &vduse_vdpa_config_ops, 1, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index cce101e6a940..04522077735b 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -32,7 +32,7 @@ struct vp_vring {
struct vp_vdpa {
struct vdpa_device vdpa;
- struct virtio_pci_modern_device mdev;
+ struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
char msix_name[VP_VDPA_NAME_SIZE];
@@ -41,6 +41,12 @@ struct vp_vdpa {
int vectors;
};
+struct vp_vdpa_mgmtdev {
+ struct vdpa_mgmt_dev mgtdev;
+ struct virtio_pci_modern_device *mdev;
+ struct vp_vdpa *vp_vdpa;
+};
+
static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vp_vdpa, vdpa);
@@ -50,7 +56,12 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- return &vp_vdpa->mdev;
+ return vp_vdpa->mdev;
+}
+
+static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
+{
+ return vp_vdpa->mdev;
}
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
@@ -96,7 +107,7 @@ static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i;
@@ -143,7 +154,7 @@ static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
{
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i, ret, irq;
int queues = vp_vdpa->queues;
@@ -198,7 +209,7 @@ err:
static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
@@ -212,7 +223,7 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
static int vp_vdpa_reset(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
vp_modern_set_status(mdev, 0);
@@ -372,7 +383,7 @@ static void vp_vdpa_get_config(struct vdpa_device *vdpa,
void *buf, unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 old, new;
u8 *p;
int i;
@@ -392,7 +403,7 @@ static void vp_vdpa_set_config(struct vdpa_device *vdpa,
unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
const u8 *p = buf;
int i;
@@ -412,7 +423,7 @@ static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct vdpa_notification_area notify;
notify.addr = vp_vdpa->vring[qid].notify_pa;
@@ -454,38 +465,31 @@ static void vp_vdpa_free_irq_vectors(void *data)
pci_free_irq_vectors(data);
}
-static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ const struct vdpa_dev_set_config *add_config)
{
- struct virtio_pci_modern_device *mdev;
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+ container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+ struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
- struct vp_vdpa *vp_vdpa;
+ struct vp_vdpa *vp_vdpa = NULL;
int ret, i;
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL, false);
+ dev, &vp_vdpa_ops, 1, 1, name, false);
+
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
}
- mdev = &vp_vdpa->mdev;
- mdev->pci_dev = pdev;
-
- ret = vp_modern_probe(mdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
- goto err;
- }
-
- pci_set_master(pdev);
- pci_set_drvdata(pdev, vp_vdpa);
+ vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
vp_vdpa->vdpa.dma_dev = &pdev->dev;
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+ vp_vdpa->mdev = mdev;
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
@@ -516,7 +520,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
- ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+ vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
+ ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
if (ret) {
dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
goto err;
@@ -529,12 +534,104 @@ err:
return ret;
}
+static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
+ struct vdpa_device *dev)
+{
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+ container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+ struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
+
+ _vdpa_unregister_device(&vp_vdpa->vdpa);
+ vp_vdpa_mgtdev->vp_vdpa = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
+ .dev_add = vp_vdpa_dev_add,
+ .dev_del = vp_vdpa_dev_del,
+};
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
+ struct vdpa_mgmt_dev *mgtdev;
+ struct device *dev = &pdev->dev;
+ struct virtio_pci_modern_device *mdev = NULL;
+ struct virtio_device_id *mdev_id = NULL;
+ int err;
+
+ vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
+ if (!vp_vdpa_mgtdev)
+ return -ENOMEM;
+
+ mgtdev = &vp_vdpa_mgtdev->mgtdev;
+ mgtdev->ops = &vp_vdpa_mdev_ops;
+ mgtdev->device = dev;
+
+ mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
+ if (!mdev) {
+ err = -ENOMEM;
+ goto mdev_err;
+ }
+
+ mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
+ if (!mdev_id) {
+ err = -ENOMEM;
+ goto mdev_id_err;
+ }
+
+ vp_vdpa_mgtdev->mdev = mdev;
+ mdev->pci_dev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ goto probe_err;
+ }
+
+ err = vp_modern_probe(mdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+ goto probe_err;
+ }
+
+ mdev_id->device = mdev->id.device;
+ mdev_id->vendor = mdev->id.vendor;
+ mgtdev->id_table = mdev_id;
+ mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
+ mgtdev->supported_features = vp_modern_get_features(mdev);
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vp_vdpa_mgtdev);
+
+ err = vdpa_mgmtdev_register(mgtdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
+ goto register_err;
+ }
+
+ return 0;
+
+register_err:
+ vp_modern_remove(vp_vdpa_mgtdev->mdev);
+probe_err:
+ kfree(mdev_id);
+mdev_id_err:
+ kfree(mdev);
+mdev_err:
+ kfree(vp_vdpa_mgtdev);
+ return err;
+}
+
static void vp_vdpa_remove(struct pci_dev *pdev)
{
- struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
+ struct virtio_pci_modern_device *mdev = NULL;
- vp_modern_remove(&vp_vdpa->mdev);
- vdpa_unregister_device(&vp_vdpa->vdpa);
+ mdev = vp_vdpa_mgtdev->mdev;
+ vp_modern_remove(mdev);
+ vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+ kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
+ kfree(mdev);
+ kfree(vp_vdpa_mgtdev);
}
static struct pci_driver vp_vdpa_driver = {
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 5829cf2d0552..ea61330a3431 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -126,6 +126,23 @@ void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
/**
+ * vhost_iotlb_init - initialize a vhost IOTLB
+ * @iotlb: the IOTLB that needs to be initialized
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ */
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags)
+{
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_init);
+
+/**
* vhost_iotlb_alloc - add a new vhost IOTLB
* @limit: maximum number of IOTLB entries
* @flags: VHOST_IOTLB_FLAG_XXX
@@ -139,11 +156,7 @@ struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
if (!iotlb)
return NULL;
- iotlb->root = RB_ROOT_CACHED;
- iotlb->limit = limit;
- iotlb->nmaps = 0;
- iotlb->flags = flags;
- INIT_LIST_HEAD(&iotlb->list);
+ vhost_iotlb_init(iotlb, limit, flags);
return iotlb;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 297b5db47454..68e4ecd1cc0e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1374,16 +1374,9 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}
-static void vhost_net_flush_vq(struct vhost_net *n, int index)
-{
- vhost_poll_flush(n->poll + index);
- vhost_poll_flush(&n->vqs[index].vq.poll);
-}
-
static void vhost_net_flush(struct vhost_net *n)
{
- vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
- vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+ vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
@@ -1572,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
if (oldsock) {
- vhost_net_flush_vq(n, index);
+ vhost_dev_flush(&n->dev);
sockfd_put(oldsock);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 532e204f2b1b..ffd9e6c2ffc1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1436,7 +1436,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- vhost_work_dev_flush(&vs->dev);
+ vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
- /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
- vhost_scsi_flush(vs);
kfree(vs->dev.vqs);
kvfree(vs);
return 0;
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 05740cba1cd8..bc8e7fb1e635 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -144,14 +144,9 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
}
-static void vhost_test_flush_vq(struct vhost_test *n, int index)
-{
- vhost_poll_flush(&n->vqs[index].poll);
-}
-
static void vhost_test_flush(struct vhost_test *n)
{
- vhost_test_flush_vq(n, VHOST_TEST_VQ);
+ vhost_dev_flush(&n->dev);
}
static int vhost_test_release(struct inode *inode, struct file *f)
@@ -163,9 +158,6 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
- /* We do an extra flush before freeing memory,
- * since jobs can re-queue themselves. */
- vhost_test_flush(n);
kfree(n->dev.vqs);
kfree(n);
return 0;
@@ -210,7 +202,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
goto err;
if (oldpriv) {
- vhost_test_flush_vq(n, index);
+ vhost_test_flush(n);
}
}
@@ -303,7 +295,7 @@ static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
mutex_unlock(&vq->mutex);
if (enable) {
- vhost_test_flush_vq(n, index);
+ vhost_test_flush(n);
}
mutex_unlock(&n->dev.mutex);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 4c2f0bd06285..935a1d0ddb97 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -28,17 +28,27 @@
enum {
VHOST_VDPA_BACKEND_FEATURES =
(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
- (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
};
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+#define VHOST_VDPA_IOTLB_BUCKETS 16
+
+struct vhost_vdpa_as {
+ struct hlist_node hash_link;
+ struct vhost_iotlb iotlb;
+ u32 id;
+};
+
struct vhost_vdpa {
struct vhost_dev vdev;
struct iommu_domain *domain;
struct vhost_virtqueue *vqs;
struct completion completion;
struct vdpa_device *vdpa;
+ struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
struct device dev;
struct cdev cdev;
atomic_t opened;
@@ -48,12 +58,89 @@ struct vhost_vdpa {
struct eventfd_ctx *config_ctx;
int in_batch;
struct vdpa_iova_range range;
+ u32 batch_asid;
};
static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+ struct vhost_vdpa_as *as = container_of(iotlb, struct
+ vhost_vdpa_as, iotlb);
+ return as->id;
+}
+
+static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ hlist_for_each_entry(as, head, hash_link)
+ if (as->id == asid)
+ return as;
+
+ return NULL;
+}
+
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return NULL;
+
+ return &as->iotlb;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ if (asid_to_as(v, asid))
+ return NULL;
+
+ if (asid >= v->vdpa->nas)
+ return NULL;
+
+ as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
+ return NULL;
+
+ vhost_iotlb_init(&as->iotlb, 0, 0);
+ as->id = asid;
+ hlist_add_head(&as->hash_link, head);
+
+ return as;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+ u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (as)
+ return as;
+
+ return vhost_vdpa_alloc_as(v, asid);
+}
+
+static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return -EINVAL;
+
+ hlist_del(&as->hash_link);
+ vhost_iotlb_reset(&as->iotlb);
+ kfree(as);
+
+ return 0;
+}
+
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -411,6 +498,22 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
+ case VHOST_VDPA_GET_VRING_GROUP:
+ s.index = idx;
+ s.num = ops->get_vq_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_VDPA_SET_GROUP_ASID:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ if (s.num >= vdpa->nas)
+ return -EINVAL;
+ if (!ops->set_group_asid)
+ return -EOPNOTSUPP;
+ return ops->set_group_asid(vdpa, idx, s.num);
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
@@ -505,6 +608,15 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VRING_NUM:
r = vhost_vdpa_get_vring_num(v, argp);
break;
+ case VHOST_VDPA_GET_GROUP_NUM:
+ if (copy_to_user(argp, &v->vdpa->ngroups,
+ sizeof(v->vdpa->ngroups)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_AS_NUM:
+ if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
+ r = -EFAULT;
+ break;
case VHOST_SET_LOG_BASE:
case VHOST_SET_LOG_FD:
r = -ENOIOCTLCMD;
@@ -537,10 +649,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct page *page;
unsigned long pfn, pinned;
@@ -559,10 +672,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
- struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
@@ -574,23 +687,16 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va)
- return vhost_vdpa_va_unmap(v, start, last);
-
- return vhost_vdpa_pa_unmap(v, start, last);
-}
-
-static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
-{
- struct vhost_dev *dev = &v->vdev;
+ return vhost_vdpa_va_unmap(v, iotlb, start, last);
- vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
- kfree(dev->iotlb);
- dev->iotlb = NULL;
+ return vhost_vdpa_pa_unmap(v, iotlb, start, last);
}
static int perm_to_iommu_flags(u32 perm)
@@ -615,30 +721,31 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
-static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
- u64 size, u64 pa, u32 perm, void *opaque)
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
int r = 0;
- r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+ r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
- r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
+ r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
- r = ops->set_map(vdpa, dev->iotlb);
+ r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
if (r) {
- vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+ vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
return r;
}
@@ -648,25 +755,34 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
return 0;
}
-static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size)
{
- struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
- vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+ vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
if (ops->dma_map) {
- ops->dma_unmap(vdpa, iova, size);
+ ops->dma_unmap(vdpa, asid, iova, size);
} else if (ops->set_map) {
if (!v->in_batch)
- ops->set_map(vdpa, dev->iotlb);
+ ops->set_map(vdpa, asid, iotlb);
} else {
iommu_unmap(v->domain, iova, size);
}
+
+ /* If we are in the middle of batch processing, delay the free
+ * of AS until BATCH_END.
+ */
+ if (!v->in_batch && !iotlb->nmaps)
+ vhost_vdpa_remove_as(v, asid);
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@@ -696,7 +812,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
map_file->offset = offset;
map_file->file = get_file(vma->vm_file);
- ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+ ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
perm, map_file);
if (ret) {
fput(map_file->file);
@@ -709,7 +825,7 @@ next:
map_iova += map_size;
}
if (ret)
- vhost_vdpa_unmap(v, iova, map_iova - iova);
+ vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
mmap_read_unlock(dev->mm);
@@ -717,6 +833,7 @@ next:
}
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@@ -780,7 +897,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = PFN_PHYS(last_pfn - map_pfn + 1);
- ret = vhost_vdpa_map(v, iova, csize,
+ ret = vhost_vdpa_map(v, iotlb, iova, csize,
PFN_PHYS(map_pfn),
perm, NULL);
if (ret) {
@@ -810,7 +927,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
@@ -830,7 +947,7 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
- vhost_vdpa_unmap(v, start, size);
+ vhost_vdpa_unmap(v, iotlb, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
@@ -841,11 +958,10 @@ free:
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
struct vhost_iotlb_msg *msg)
{
- struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
- struct vhost_iotlb *iotlb = dev->iotlb;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
@@ -857,19 +973,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
return -EEXIST;
if (vdpa->use_va)
- return vhost_vdpa_va_map(v, msg->iova, msg->size,
+ return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
msg->uaddr, msg->perm);
- return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+ return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
msg->perm);
}
-static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_iotlb *iotlb = NULL;
+ struct vhost_vdpa_as *as = NULL;
int r = 0;
mutex_lock(&dev->mutex);
@@ -878,20 +996,47 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
if (r)
goto unlock;
+ if (msg->type == VHOST_IOTLB_UPDATE ||
+ msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+ as = vhost_vdpa_find_alloc_as(v, asid);
+ if (!as) {
+ dev_err(&v->dev, "can't find and alloc asid %d\n",
+ asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+ iotlb = &as->iotlb;
+ } else
+ iotlb = asid_to_iotlb(v, asid);
+
+ if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+ if (v->in_batch && v->batch_asid != asid) {
+ dev_info(&v->dev, "batch id %d asid %d\n",
+ v->batch_asid, asid);
+ }
+ if (!iotlb)
+ dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
- r = vhost_vdpa_process_iotlb_update(v, msg);
+ r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
break;
case VHOST_IOTLB_INVALIDATE:
- vhost_vdpa_unmap(v, msg->iova, msg->size);
+ vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
break;
case VHOST_IOTLB_BATCH_BEGIN:
+ v->batch_asid = asid;
v->in_batch = true;
break;
case VHOST_IOTLB_BATCH_END:
if (v->in_batch && ops->set_map)
- ops->set_map(vdpa, dev->iotlb);
+ ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
+ if (!iotlb->nmaps)
+ vhost_vdpa_remove_as(v, asid);
break;
default:
r = -EINVAL;
@@ -977,6 +1122,21 @@ static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
}
}
+static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+{
+ struct vhost_vdpa_as *as;
+ u32 asid;
+
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+
+ for (asid = 0; asid < v->vdpa->nas; asid++) {
+ as = asid_to_as(v, asid);
+ if (as)
+ vhost_vdpa_remove_as(v, asid);
+ }
+}
+
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
@@ -1010,15 +1170,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
- dev->iotlb = vhost_iotlb_alloc(0, 0);
- if (!dev->iotlb) {
- r = -ENOMEM;
- goto err_init_iotlb;
- }
-
r = vhost_vdpa_alloc_domain(v);
if (r)
- goto err_init_iotlb;
+ goto err_alloc_domain;
vhost_vdpa_set_iova_range(v);
@@ -1026,9 +1180,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return 0;
-err_init_iotlb:
- vhost_dev_cleanup(&v->vdev);
- kfree(vqs);
+err_alloc_domain:
+ vhost_vdpa_cleanup(v);
err:
atomic_dec(&v->opened);
return r;
@@ -1052,11 +1205,9 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
- vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
vhost_dev_cleanup(&v->vdev);
- kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
atomic_dec(&v->opened);
@@ -1152,7 +1303,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa *v;
int minor;
- int r;
+ int i, r;
+
+ /* We can't support platform IOMMU device with more than 1
+ * group or as
+ */
+ if (!ops->set_map && !ops->dma_map &&
+ (vdpa->ngroups > 1 || vdpa->nas > 1))
+ return -EOPNOTSUPP;
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!v)
@@ -1196,6 +1354,9 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
init_completion(&v->completion);
vdpa_set_drvdata(vdpa, v);
+ for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
+ INIT_HLIST_HEAD(&v->as[i]);
+
return 0;
err:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d02173fb290c..40097826cff0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_work_dev_flush(struct vhost_dev *dev)
+void vhost_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
@@ -243,15 +243,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
wait_for_completion(&flush.wait_event);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
-
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
-{
- vhost_work_dev_flush(poll->dev);
-}
-EXPORT_SYMBOL_GPL(vhost_poll_flush);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
@@ -468,7 +460,7 @@ void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
bool use_worker,
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
@@ -538,7 +530,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
- vhost_work_dev_flush(dev);
+ vhost_dev_flush(dev);
return attach.ret;
}
@@ -661,11 +653,11 @@ void vhost_dev_stop(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+ if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
vhost_poll_stop(&dev->vqs[i]->poll);
- vhost_poll_flush(&dev->vqs[i]->poll);
- }
}
+
+ vhost_dev_flush(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
@@ -1090,11 +1082,14 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access)
return true;
}
-static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
int ret = 0;
+ if (asid != 0)
+ return -EINVAL;
+
mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
@@ -1141,6 +1136,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct vhost_iotlb_msg msg;
size_t offset;
int type, ret;
+ u32 asid = 0;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type)) {
@@ -1156,7 +1152,16 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
break;
case VHOST_IOTLB_MSG_V2:
- offset = sizeof(__u32);
+ if (vhost_backend_has_feature(dev->vqs[0],
+ VHOST_BACKEND_F_IOTLB_ASID)) {
+ ret = copy_from_iter(&asid, sizeof(asid), from);
+ if (ret != sizeof(asid)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ offset = 0;
+ } else
+ offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
@@ -1178,9 +1183,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
}
if (dev->msg_handler)
- ret = dev->msg_handler(dev, &msg);
+ ret = dev->msg_handler(dev, asid, &msg);
else
- ret = vhost_process_iotlb_msg(dev, &msg);
+ ret = vhost_process_iotlb_msg(dev, asid, &msg);
if (ret) {
ret = -EFAULT;
goto done;
@@ -1719,7 +1724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
- vhost_poll_flush(&vq->poll);
+ vhost_dev_flush(vq->poll.dev);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 638bb640d6b4..d9109107af08 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -44,9 +44,8 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
__poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
-void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_dev_flush(struct vhost_dev *dev);
+void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
@@ -161,7 +160,7 @@ struct vhost_dev {
int byte_weight;
u64 kcov_handle;
bool use_worker;
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
};
@@ -169,7 +168,7 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
int nvqs, int iov_limit, int weight, int byte_weight,
bool use_worker,
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e6c9d41db1de..368330417bde 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -705,12 +705,7 @@ out:
static void vhost_vsock_flush(struct vhost_vsock *vsock)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
- if (vsock->vqs[i].handle_kick)
- vhost_poll_flush(&vsock->vqs[i].poll);
- vhost_work_dev_flush(&vsock->dev);
+ vhost_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 40496e9e9b43..f304163e87e9 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/font.h>
#include <linux/crc32.h>
+#include <linux/fb.h>
#include <asm/io.h>
@@ -392,7 +393,9 @@ static int __init sticonsole_init(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
font_data[i] = STI_DEF_FONT;
- pr_info("sticon: Initializing STI text console.\n");
+ pr_info("sticon: Initializing STI text console on %s at [%s]\n",
+ sticon_sti->sti_data->inq_outptr.dev_name,
+ sticon_sti->pa_path);
console_lock();
err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
PAGE0->mem_cons.cl_class != CL_DUPLEX);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 837011be9978..fa23bf0247b0 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -30,10 +30,11 @@
#include <asm/pdc.h>
#include <asm/cacheflush.h>
#include <asm/grfioctl.h>
+#include <asm/fb.h>
#include "../fbdev/sticore.h"
-#define STI_DRIVERVERSION "Version 0.9b"
+#define STI_DRIVERVERSION "Version 0.9c"
static struct sti_struct *default_sti __read_mostly;
@@ -502,7 +503,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (!fbfont)
return NULL;
- pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
+ pr_info(" using %ux%u framebuffer font %s\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
@@ -967,6 +968,7 @@ out_err:
static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
{
+ pr_info(" located at [%s]\n", sti->pa_path);
if (strcmp (path, default_sti_path) == 0)
default_sti = sti;
}
@@ -978,7 +980,6 @@ static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
*/
static int __init sticore_pa_init(struct parisc_device *dev)
{
- char pa_path[21];
struct sti_struct *sti = NULL;
int hpa = dev->hpa.start;
@@ -991,8 +992,8 @@ static int __init sticore_pa_init(struct parisc_device *dev)
if (!sti)
return 1;
- print_pa_hwpath(dev, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pa_hwpath(dev, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
return 0;
}
@@ -1028,9 +1029,8 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
sti = sti_try_rom_generic(rom_base, fb_base, pd);
if (sti) {
- char pa_path[30];
- print_pci_hwpath(pd, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pci_hwpath(pd, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
}
if (!sti) {
@@ -1148,6 +1148,22 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
return ret;
}
+/* check if given fb_info is the primary device */
+int fb_is_primary_device(struct fb_info *info)
+{
+ struct sti_struct *sti;
+
+ sti = sti_get_rom(0);
+
+ /* if no built-in graphics card found, allow any fb driver as default */
+ if (!sti)
+ return true;
+
+ /* return true if it's the default built-in framebuffer driver */
+ return (sti->info == info);
+}
+EXPORT_SYMBOL(fb_is_primary_device);
+
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/pxa3xx-regs.h b/drivers/video/fbdev/pxa3xx-regs.h
new file mode 100644
index 000000000000..6a96610ef9b5
--- /dev/null
+++ b/drivers/video/fbdev/pxa3xx-regs.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARCH_REGS_LCD_H
+#define __ASM_ARCH_REGS_LCD_H
+
+/*
+ * LCD Controller Registers and Bits Definitions
+ */
+#define LCCR0 (0x000) /* LCD Controller Control Register 0 */
+#define LCCR1 (0x004) /* LCD Controller Control Register 1 */
+#define LCCR2 (0x008) /* LCD Controller Control Register 2 */
+#define LCCR3 (0x00C) /* LCD Controller Control Register 3 */
+#define LCCR4 (0x010) /* LCD Controller Control Register 4 */
+#define LCCR5 (0x014) /* LCD Controller Control Register 5 */
+#define LCSR (0x038) /* LCD Controller Status Register 0 */
+#define LCSR1 (0x034) /* LCD Controller Status Register 1 */
+#define LIIDR (0x03C) /* LCD Controller Interrupt ID Register */
+#define TMEDRGBR (0x040) /* TMED RGB Seed Register */
+#define TMEDCR (0x044) /* TMED Control Register */
+
+#define FBR0 (0x020) /* DMA Channel 0 Frame Branch Register */
+#define FBR1 (0x024) /* DMA Channel 1 Frame Branch Register */
+#define FBR2 (0x028) /* DMA Channel 2 Frame Branch Register */
+#define FBR3 (0x02C) /* DMA Channel 2 Frame Branch Register */
+#define FBR4 (0x030) /* DMA Channel 2 Frame Branch Register */
+#define FBR5 (0x110) /* DMA Channel 2 Frame Branch Register */
+#define FBR6 (0x114) /* DMA Channel 2 Frame Branch Register */
+
+#define OVL1C1 (0x050) /* Overlay 1 Control Register 1 */
+#define OVL1C2 (0x060) /* Overlay 1 Control Register 2 */
+#define OVL2C1 (0x070) /* Overlay 2 Control Register 1 */
+#define OVL2C2 (0x080) /* Overlay 2 Control Register 2 */
+
+#define CMDCR (0x100) /* Command Control Register */
+#define PRSR (0x104) /* Panel Read Status Register */
+
+#define LCCR3_BPP(x) ((((x) & 0x7) << 24) | (((x) & 0x8) ? (1 << 29) : 0))
+
+#define LCCR3_PDFOR_0 (0 << 30)
+#define LCCR3_PDFOR_1 (1 << 30)
+#define LCCR3_PDFOR_2 (2 << 30)
+#define LCCR3_PDFOR_3 (3 << 30)
+
+#define LCCR4_PAL_FOR_0 (0 << 15)
+#define LCCR4_PAL_FOR_1 (1 << 15)
+#define LCCR4_PAL_FOR_2 (2 << 15)
+#define LCCR4_PAL_FOR_3 (3 << 15)
+#define LCCR4_PAL_FOR_MASK (3 << 15)
+
+#define FDADR0 (0x200) /* DMA Channel 0 Frame Descriptor Address Register */
+#define FDADR1 (0x210) /* DMA Channel 1 Frame Descriptor Address Register */
+#define FDADR2 (0x220) /* DMA Channel 2 Frame Descriptor Address Register */
+#define FDADR3 (0x230) /* DMA Channel 3 Frame Descriptor Address Register */
+#define FDADR4 (0x240) /* DMA Channel 4 Frame Descriptor Address Register */
+#define FDADR5 (0x250) /* DMA Channel 5 Frame Descriptor Address Register */
+#define FDADR6 (0x260) /* DMA Channel 6 Frame Descriptor Address Register */
+
+#define LCCR0_ENB (1 << 0) /* LCD Controller enable */
+#define LCCR0_CMS (1 << 1) /* Color/Monochrome Display Select */
+#define LCCR0_Color (LCCR0_CMS*0) /* Color display */
+#define LCCR0_Mono (LCCR0_CMS*1) /* Monochrome display */
+#define LCCR0_SDS (1 << 2) /* Single/Dual Panel Display Select */
+#define LCCR0_Sngl (LCCR0_SDS*0) /* Single panel display */
+#define LCCR0_Dual (LCCR0_SDS*1) /* Dual panel display */
+
+#define LCCR0_LDM (1 << 3) /* LCD Disable Done Mask */
+#define LCCR0_SFM (1 << 4) /* Start of frame mask */
+#define LCCR0_IUM (1 << 5) /* Input FIFO underrun mask */
+#define LCCR0_EFM (1 << 6) /* End of Frame mask */
+#define LCCR0_PAS (1 << 7) /* Passive/Active display Select */
+#define LCCR0_Pas (LCCR0_PAS*0) /* Passive display (STN) */
+#define LCCR0_Act (LCCR0_PAS*1) /* Active display (TFT) */
+#define LCCR0_DPD (1 << 9) /* Double Pixel Data (monochrome) */
+#define LCCR0_4PixMono (LCCR0_DPD*0) /* 4-Pixel/clock Monochrome display */
+#define LCCR0_8PixMono (LCCR0_DPD*1) /* 8-Pixel/clock Monochrome display */
+#define LCCR0_DIS (1 << 10) /* LCD Disable */
+#define LCCR0_QDM (1 << 11) /* LCD Quick Disable mask */
+#define LCCR0_PDD (0xff << 12) /* Palette DMA request delay */
+#define LCCR0_PDD_S 12
+#define LCCR0_BM (1 << 20) /* Branch mask */
+#define LCCR0_OUM (1 << 21) /* Output FIFO underrun mask */
+#define LCCR0_LCDT (1 << 22) /* LCD panel type */
+#define LCCR0_RDSTM (1 << 23) /* Read status interrupt mask */
+#define LCCR0_CMDIM (1 << 24) /* Command interrupt mask */
+#define LCCR0_OUC (1 << 25) /* Overlay Underlay control bit */
+#define LCCR0_LDDALT (1 << 26) /* LDD alternate mapping control */
+
+#define Fld(Size, Shft) (((Size) << 16) + (Shft))
+#define FShft(Field) ((Field) & 0x0000FFFF)
+
+#define LCCR1_PPL Fld (10, 0) /* Pixels Per Line - 1 */
+#define LCCR1_DisWdth(Pixel) (((Pixel) - 1) << FShft (LCCR1_PPL))
+
+#define LCCR1_HSW Fld (6, 10) /* Horizontal Synchronization */
+#define LCCR1_HorSnchWdth(Tpix) (((Tpix) - 1) << FShft (LCCR1_HSW))
+
+#define LCCR1_ELW Fld (8, 16) /* End-of-Line pixel clock Wait - 1 */
+#define LCCR1_EndLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_ELW))
+
+#define LCCR1_BLW Fld (8, 24) /* Beginning-of-Line pixel clock */
+#define LCCR1_BegLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_BLW))
+
+#define LCCR2_LPP Fld (10, 0) /* Line Per Panel - 1 */
+#define LCCR2_DisHght(Line) (((Line) - 1) << FShft (LCCR2_LPP))
+
+#define LCCR2_VSW Fld (6, 10) /* Vertical Synchronization pulse - 1 */
+#define LCCR2_VrtSnchWdth(Tln) (((Tln) - 1) << FShft (LCCR2_VSW))
+
+#define LCCR2_EFW Fld (8, 16) /* End-of-Frame line clock Wait */
+#define LCCR2_EndFrmDel(Tln) ((Tln) << FShft (LCCR2_EFW))
+
+#define LCCR2_BFW Fld (8, 24) /* Beginning-of-Frame line clock */
+#define LCCR2_BegFrmDel(Tln) ((Tln) << FShft (LCCR2_BFW))
+
+#define LCCR3_API (0xf << 16) /* AC Bias pin trasitions per interrupt */
+#define LCCR3_API_S 16
+#define LCCR3_VSP (1 << 20) /* vertical sync polarity */
+#define LCCR3_HSP (1 << 21) /* horizontal sync polarity */
+#define LCCR3_PCP (1 << 22) /* Pixel Clock Polarity (L_PCLK) */
+#define LCCR3_PixRsEdg (LCCR3_PCP*0) /* Pixel clock Rising-Edge */
+#define LCCR3_PixFlEdg (LCCR3_PCP*1) /* Pixel clock Falling-Edge */
+
+#define LCCR3_OEP (1 << 23) /* Output Enable Polarity */
+#define LCCR3_OutEnH (LCCR3_OEP*0) /* Output Enable active High */
+#define LCCR3_OutEnL (LCCR3_OEP*1) /* Output Enable active Low */
+
+#define LCCR3_DPC (1 << 27) /* double pixel clock mode */
+#define LCCR3_PCD Fld (8, 0) /* Pixel Clock Divisor */
+#define LCCR3_PixClkDiv(Div) (((Div) << FShft (LCCR3_PCD)))
+
+#define LCCR3_ACB Fld (8, 8) /* AC Bias */
+#define LCCR3_Acb(Acb) (((Acb) << FShft (LCCR3_ACB)))
+
+#define LCCR3_HorSnchH (LCCR3_HSP*0) /* HSP Active High */
+#define LCCR3_HorSnchL (LCCR3_HSP*1) /* HSP Active Low */
+
+#define LCCR3_VrtSnchH (LCCR3_VSP*0) /* VSP Active High */
+#define LCCR3_VrtSnchL (LCCR3_VSP*1) /* VSP Active Low */
+
+#define LCCR5_IUM(x) (1 << ((x) + 23)) /* input underrun mask */
+#define LCCR5_BSM(x) (1 << ((x) + 15)) /* branch mask */
+#define LCCR5_EOFM(x) (1 << ((x) + 7)) /* end of frame mask */
+#define LCCR5_SOFM(x) (1 << ((x) + 0)) /* start of frame mask */
+
+#define LCSR_LDD (1 << 0) /* LCD Disable Done */
+#define LCSR_SOF (1 << 1) /* Start of frame */
+#define LCSR_BER (1 << 2) /* Bus error */
+#define LCSR_ABC (1 << 3) /* AC Bias count */
+#define LCSR_IUL (1 << 4) /* input FIFO underrun Lower panel */
+#define LCSR_IUU (1 << 5) /* input FIFO underrun Upper panel */
+#define LCSR_OU (1 << 6) /* output FIFO underrun */
+#define LCSR_QD (1 << 7) /* quick disable */
+#define LCSR_EOF (1 << 8) /* end of frame */
+#define LCSR_BS (1 << 9) /* branch status */
+#define LCSR_SINT (1 << 10) /* subsequent interrupt */
+#define LCSR_RD_ST (1 << 11) /* read status */
+#define LCSR_CMD_INT (1 << 12) /* command interrupt */
+
+#define LCSR1_IU(x) (1 << ((x) + 23)) /* Input FIFO underrun */
+#define LCSR1_BS(x) (1 << ((x) + 15)) /* Branch Status */
+#define LCSR1_EOF(x) (1 << ((x) + 7)) /* End of Frame Status */
+#define LCSR1_SOF(x) (1 << ((x) - 1)) /* Start of Frame Status */
+
+#define LDCMD_PAL (1 << 26) /* instructs DMA to load palette buffer */
+
+/* overlay control registers */
+#define OVLxC1_PPL(x) ((((x) - 1) & 0x3ff) << 0) /* Pixels Per Line */
+#define OVLxC1_LPO(x) ((((x) - 1) & 0x3ff) << 10) /* Number of Lines */
+#define OVLxC1_BPP(x) (((x) & 0xf) << 20) /* Bits Per Pixel */
+#define OVLxC1_OEN (1 << 31) /* Enable bit for Overlay */
+#define OVLxC2_XPOS(x) (((x) & 0x3ff) << 0) /* Horizontal Position */
+#define OVLxC2_YPOS(x) (((x) & 0x3ff) << 10) /* Vertical Position */
+#define OVL2C2_PFOR(x) (((x) & 0x7) << 20) /* Pixel Format */
+
+/* smartpanel related */
+#define PRSR_DATA(x) ((x) & 0xff) /* Panel Data */
+#define PRSR_A0 (1 << 8) /* Read Data Source */
+#define PRSR_ST_OK (1 << 9) /* Status OK */
+#define PRSR_CON_NT (1 << 10) /* Continue to Next Command */
+
+#endif /* __ASM_ARCH_REGS_LCD_H */
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 8ad91c251fe6..66cfc3e9d3cf 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -57,14 +57,13 @@
#include <linux/console.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <mach/bitfield.h>
#include <linux/platform_data/video-pxafb.h>
/*
@@ -73,6 +72,7 @@
#define DEBUG_VAR 1
#include "pxafb.h"
+#include "pxa3xx-regs.h"
/* Bits which should not be set in machine configuration structures */
#define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index c338f7848ae2..0ebdd28a0b81 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -370,6 +370,9 @@ struct sti_struct {
/* pointer to all internal data */
struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
};
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index bebb2eea6448..38a861e22c33 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1358,11 +1358,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err3;
}
+ /* save for primary gfx device detection & unregister_framebuffer() */
+ sti->info = info;
if (register_framebuffer(&fb->info) < 0)
goto out_err4;
- sti->info = info; /* save for unregister_framebuffer() */
-
fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index e49bec8bc8a4..07035249a5e1 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -659,7 +659,6 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
{
struct doorbell_queue *dbq;
unsigned long flags;
- int ret = 0;
dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
if (!dbq) {
@@ -676,7 +675,7 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
filp->private_data = dbq;
- return ret;
+ return 0;
}
/*
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 22f15f444f75..ef04a96942bf 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
/* Do some validation, then set FEATURES_OK */
static int virtio_features_ok(struct virtio_device *dev)
{
- unsigned status;
+ unsigned int status;
int ret;
might_sleep();
@@ -220,6 +220,15 @@ static int virtio_features_ok(struct virtio_device *dev)
* */
void virtio_reset_device(struct virtio_device *dev)
{
+ /*
+ * The below virtio_synchronize_cbs() guarantees that any
+ * interrupt for this line arriving after
+ * virtio_synchronize_vqs() has completed is guaranteed to see
+ * vq->broken as true.
+ */
+ virtio_break_device(dev);
+ virtio_synchronize_cbs(dev);
+
dev->config->reset(dev);
}
EXPORT_SYMBOL_GPL(virtio_reset_device);
@@ -413,7 +422,7 @@ int register_virtio_device(struct virtio_device *dev)
device_initialize(&dev->dev);
/* Assign a unique device index and hence name. */
- err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&virtio_index_ida, GFP_KERNEL);
if (err < 0)
goto out;
@@ -428,16 +437,16 @@ int register_virtio_device(struct virtio_device *dev)
dev->config_enabled = false;
dev->config_change_pending = false;
+ INIT_LIST_HEAD(&dev->vqs);
+ spin_lock_init(&dev->vqs_list_lock);
+
/* We always start by resetting the device, in case a previous
* driver messed it up. This also tests that code path a little. */
- dev->config->reset(dev);
+ virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
- INIT_LIST_HEAD(&dev->vqs);
- spin_lock_init(&dev->vqs_list_lock);
-
/*
* device_add() causes the bus infrastructure to look for a matching
* driver.
@@ -451,7 +460,7 @@ int register_virtio_device(struct virtio_device *dev)
out_of_node_put:
of_node_put(dev->dev.of_node);
out_ida_remove:
- ida_simple_remove(&virtio_index_ida, dev->index);
+ ida_free(&virtio_index_ida, dev->index);
out:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -469,7 +478,7 @@ void unregister_virtio_device(struct virtio_device *dev)
int index = dev->index; /* save for after device release */
device_unregister(&dev->dev);
- ida_simple_remove(&virtio_index_ida, index);
+ ida_free(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
@@ -496,7 +505,7 @@ int virtio_device_restore(struct virtio_device *dev)
/* We always start by resetting the device, in case a previous
* driver messed it up. */
- dev->config->reset(dev);
+ virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
@@ -526,8 +535,9 @@ int virtio_device_restore(struct virtio_device *dev)
goto err;
}
- /* Finally, tell the device we're all set */
- virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ /* If restore didn't do it, mark device DRIVER_OK ourselves. */
+ if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+ virtio_device_ready(dev);
virtio_config_enable(dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index f4c34a2a6b8e..b9737da6c4dd 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -27,7 +27,7 @@
* multiple balloon pages. All memory counters in this driver are in balloon
* page units.
*/
-#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
+#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
/* Maximum number of (4k) pages to deflate on OOM notifications. */
#define VIRTIO_BALLOON_OOM_NR_PAGES 256
@@ -208,10 +208,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
page_to_balloon_pfn(page) + i);
}
-static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
{
- unsigned num_allocated_pages;
- unsigned num_pfns;
+ unsigned int num_allocated_pages;
+ unsigned int num_pfns;
struct page *page;
LIST_HEAD(pages);
@@ -272,9 +272,9 @@ static void release_pages_balloon(struct virtio_balloon *vb,
}
}
-static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
{
- unsigned num_freed_pages;
+ unsigned int num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
LIST_HEAD(pages);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 56128b9c46eb..f9a36bc7ac27 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -144,8 +144,8 @@ static int vm_finalize_features(struct virtio_device *vdev)
return 0;
}
-static void vm_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vm_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -186,8 +186,8 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
}
}
-static void vm_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vm_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -253,6 +253,11 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
/* We should never be setting status to 0. */
BUG_ON(status == 0);
+ /*
+ * Per memory-barriers.txt, wmb() is not needed to guarantee
+ * that the the cache coherent memory writes have completed
+ * before writing to the MMIO region.
+ */
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
}
@@ -345,7 +350,14 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
-static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
+static void vm_synchronize_cbs(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
+}
+
+static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
@@ -455,7 +467,7 @@ error_available:
return ERR_PTR(err);
}
-static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
@@ -541,6 +553,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.finalize_features = vm_finalize_features,
.bus_name = vm_bus_name,
.get_shm_region = vm_get_shm_region,
+ .synchronize_cbs = vm_synchronize_cbs,
};
@@ -657,7 +670,7 @@ static int vm_cmdline_set(const char *device,
int err;
struct resource resources[2] = {};
char *str;
- long long int base, size;
+ long long base, size;
unsigned int irq;
int processed, consumed = 0;
struct platform_device *pdev;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d724f676608b..ca51fcc9daab 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -104,8 +104,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
- unsigned flags = PCI_IRQ_MSIX;
- unsigned i, v;
+ unsigned int flags = PCI_IRQ_MSIX;
+ unsigned int i, v;
int err = -ENOMEM;
vp_dev->msix_vectors = nvectors;
@@ -171,7 +171,7 @@ error:
return err;
}
-static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {
@@ -276,7 +275,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
-static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], bool per_vq_vectors,
const bool *ctx,
@@ -350,7 +349,7 @@ error_find:
return err;
}
-static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx)
{
@@ -389,7 +388,7 @@ out_del_vqs:
}
/* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
struct irq_affinity *desc)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index eb17a29fc7ef..23112d84218f 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -38,7 +38,7 @@ struct virtio_pci_vq_info {
struct list_head node;
/* MSI-X vector (or none) */
- unsigned msix_vector;
+ unsigned int msix_vector;
};
/* Our device structure */
@@ -68,16 +68,16 @@ struct virtio_pci_device {
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
/* Number of available vectors */
- unsigned msix_vectors;
+ unsigned int msix_vectors;
/* Vectors allocated, excluding per-vq vectors if any */
- unsigned msix_used_vectors;
+ unsigned int msix_used_vectors;
/* Whether we have vector per vq */
bool per_vq_vectors;
struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned idx,
+ unsigned int idx,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -108,7 +108,7 @@ bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
struct irq_affinity *desc);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6f4e34ce96b8..a5e5721145c7 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -45,8 +45,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
}
/* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -61,8 +61,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -109,7 +109,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned index,
+ unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -192,6 +192,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.reset = vp_reset,
.find_vqs = vp_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index a2671a20ef77..623906b4996c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -60,8 +60,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
}
/* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -98,8 +98,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -183,7 +183,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned index,
+ unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -248,7 +248,7 @@ err_map_notify:
return ERR_PTR(err);
}
-static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
@@ -394,6 +394,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.reset = vp_reset,
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
@@ -411,6 +412,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.reset = vp_reset,
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 591738ad3d56..a0fa14f28a7f 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -347,6 +347,7 @@ err_map_notify:
err_map_isr:
pci_iounmap(pci_dev, mdev->common);
err_map_common:
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
return err;
}
EXPORT_SYMBOL_GPL(vp_modern_probe);
@@ -466,6 +467,11 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+ /*
+ * Per memory-barriers.txt, wmb() is not needed to guarantee
+ * that the the cache coherent memory writes have completed
+ * before writing to the MMIO region.
+ */
vp_iowrite8(status, &cfg->device_status);
}
EXPORT_SYMBOL_GPL(vp_modern_set_status);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cfb028ca238e..13a7348cedff 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -205,11 +205,9 @@ struct vring_virtqueue {
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
-static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
+static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
unsigned int total_sg)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
/*
* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold
@@ -499,7 +497,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
head = vq->free_head;
- if (virtqueue_use_indirect(_vq, total_sg))
+ if (virtqueue_use_indirect(vq, total_sg))
desc = alloc_indirect_split(_vq, total_sg, gfp);
else {
desc = NULL;
@@ -519,7 +517,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
descs_used = total_sg;
}
- if (vq->vq.num_free < descs_used) {
+ if (unlikely(vq->vq.num_free < descs_used)) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
@@ -811,7 +809,7 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
}
}
-static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
@@ -836,7 +834,7 @@ static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
return last_used_idx;
}
-static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
+static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -1178,7 +1176,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
BUG_ON(total_sg == 0);
- if (virtqueue_use_indirect(_vq, total_sg)) {
+ if (virtqueue_use_indirect(vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
if (err != -ENOMEM) {
@@ -1488,7 +1486,7 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
}
}
-static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -1690,7 +1688,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
- vq->broken = false;
+ vq->broken = true;
vq->last_used_idx = 0;
vq->event_triggered = false;
vq->num_added = 0;
@@ -2027,7 +2025,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
*
* This does not need to be serialized.
*/
-bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2074,7 +2072,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
*/
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
- unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+ unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
return !virtqueue_poll(_vq, last_used_idx);
}
@@ -2136,8 +2134,11 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
return IRQ_NONE;
}
- if (unlikely(vq->broken))
- return IRQ_HANDLED;
+ if (unlikely(vq->broken)) {
+ dev_warn_once(&vq->vq.vdev->dev,
+ "virtio vring IRQ raised before DRIVER_OK");
+ return IRQ_NONE;
+ }
/* Just a hint for performance: so it's ok that this can be racy! */
if (vq->event)
@@ -2179,7 +2180,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
- vq->broken = false;
+ vq->broken = true;
vq->last_used_idx = 0;
vq->event_triggered = false;
vq->num_added = 0;
@@ -2397,6 +2398,28 @@ void virtio_break_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_break_device);
+/*
+ * This should allow the device to be used by the driver. You may
+ * need to grab appropriate locks to flush the write to
+ * vq->broken. This should only be used in some specific case e.g
+ * (probing and restoring). This function should only be called by the
+ * core, not directly by the driver.
+ */
+void __virtio_unbreak_device(struct virtio_device *dev)
+{
+ struct virtqueue *_vq;
+
+ spin_lock(&dev->vqs_list_lock);
+ list_for_each_entry(_vq, &dev->vqs, list) {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+ }
+ spin_unlock(&dev->vqs_list_lock);
+}
+EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 76504559bc25..c40f7deb6b5a 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -53,16 +53,16 @@ static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
return to_virtio_vdpa_device(vdev)->vdpa;
}
-static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void virtio_vdpa_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
vdpa_get_config(vdpa, offset, buf, len);
}
-static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void virtio_vdpa_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -184,7 +184,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
}
/* Setup virtqueue callback */
- cb.callback = virtio_vdpa_virtqueue_cb;
+ cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -263,7 +263,7 @@ static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
virtio_vdpa_del_vq(vq);
}
-static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
diff --git a/drivers/visorbus/Kconfig b/drivers/visorbus/Kconfig
deleted file mode 100644
index fa947a79b5cd..000000000000
--- a/drivers/visorbus/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Unisys visorbus configuration
-#
-
-config UNISYS_VISORBUS
- tristate "Unisys visorbus driver"
- depends on X86_64 && ACPI
- help
- The visorbus driver is a virtualized bus for the Unisys s-Par firmware.
- Virtualized devices allow Linux guests on a system to share disks and
- network cards that do not have SR-IOV support, and to be accessed using
- the partition desktop application. The visorbus driver is required to
- discover devices on an s-Par guest, and must be present for any other
- s-Par guest driver to function correctly.
diff --git a/drivers/visorbus/Makefile b/drivers/visorbus/Makefile
deleted file mode 100644
index e8df59d1301f..000000000000
--- a/drivers/visorbus/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorbus
-#
-
-obj-$(CONFIG_UNISYS_VISORBUS) += visorbus.o
-
-visorbus-y := visorbus_main.o
-visorbus-y += visorchannel.o
-visorbus-y += visorchipset.o
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
deleted file mode 100644
index c87213554427..000000000000
--- a/drivers/visorbus/controlvmchannel.h
+++ /dev/null
@@ -1,650 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __CONTROLVMCHANNEL_H__
-#define __CONTROLVMCHANNEL_H__
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define VISOR_CONTROLVM_CHANNEL_GUID \
- GUID_INIT(0x2b3c2d10, 0x7ef5, 0x4ad8, \
- 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-
-#define CONTROLVM_MESSAGE_MAX 64
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
-
-/* Defines for various channel queues */
-#define CONTROLVM_QUEUE_REQUEST 0
-#define CONTROLVM_QUEUE_RESPONSE 1
-#define CONTROLVM_QUEUE_EVENT 2
-#define CONTROLVM_QUEUE_ACK 3
-
-/* Max num of messages stored during IOVM creation to be reused after crash */
-#define CONTROLVM_CRASHMSG_MAX 2
-
-/*
- * struct visor_segment_state
- * @enabled: May enter other states.
- * @active: Assigned to active partition.
- * @alive: Configure message sent to service/server.
- * @revoked: Similar to partition state ShuttingDown.
- * @allocated: Memory (device/port number) has been selected by Command.
- * @known: Has been introduced to the service/guest partition.
- * @ready: Service/Guest partition has responded to introduction.
- * @operating: Resource is configured and operating.
- * @reserved: Natural alignment.
- *
- * Note: Don't use high bit unless we need to switch to ushort which is
- * non-compliant.
- */
-struct visor_segment_state {
- u16 enabled:1;
- u16 active:1;
- u16 alive:1;
- u16 revoked:1;
- u16 allocated:1;
- u16 known:1;
- u16 ready:1;
- u16 operating:1;
- u16 reserved:8;
-} __packed;
-
-static const struct visor_segment_state segment_state_running = {
- 1, 1, 1, 0, 1, 1, 1, 1
-};
-
-static const struct visor_segment_state segment_state_paused = {
- 1, 1, 1, 0, 1, 1, 1, 0
-};
-
-static const struct visor_segment_state segment_state_standby = {
- 1, 1, 0, 0, 1, 1, 1, 0
-};
-
-/*
- * enum controlvm_id
- * @CONTROLVM_INVALID:
- * @CONTROLVM_BUS_CREATE: CP --> SP, GP.
- * @CONTROLVM_BUS_DESTROY: CP --> SP, GP.
- * @CONTROLVM_BUS_CONFIGURE: CP --> SP.
- * @CONTROLVM_BUS_CHANGESTATE: CP --> SP, GP.
- * @CONTROLVM_BUS_CHANGESTATE_EVENT: SP, GP --> CP.
- * @CONTROLVM_DEVICE_CREATE: CP --> SP, GP.
- * @CONTROLVM_DEVICE_DESTROY: CP --> SP, GP.
- * @CONTROLVM_DEVICE_CONFIGURE: CP --> SP.
- * @CONTROLVM_DEVICE_CHANGESTATE: CP --> SP, GP.
- * @CONTROLVM_DEVICE_CHANGESTATE_EVENT: SP, GP --> CP.
- * @CONTROLVM_DEVICE_RECONFIGURE: CP --> Boot.
- * @CONTROLVM_CHIPSET_INIT: CP --> SP, GP.
- * @CONTROLVM_CHIPSET_STOP: CP --> SP, GP.
- * @CONTROLVM_CHIPSET_READY: CP --> SP.
- * @CONTROLVM_CHIPSET_SELFTEST: CP --> SP.
- *
- * Ids for commands that may appear in either queue of a ControlVm channel.
- *
- * Commands that are initiated by the command partition (CP), by an IO or
- * console service partition (SP), or by a guest partition (GP) are:
- * - issued on the RequestQueue queue (q #0) in the ControlVm channel
- * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
- *
- * Events that are initiated by an IO or console service partition (SP) or
- * by a guest partition (GP) are:
- * - issued on the EventQueue queue (q #2) in the ControlVm channel
- * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
- */
-enum controlvm_id {
- CONTROLVM_INVALID = 0,
- /*
- * SWITCH commands required Parameter: SwitchNumber.
- * BUS commands required Parameter: BusNumber
- */
- CONTROLVM_BUS_CREATE = 0x101,
- CONTROLVM_BUS_DESTROY = 0x102,
- CONTROLVM_BUS_CONFIGURE = 0x104,
- CONTROLVM_BUS_CHANGESTATE = 0x105,
- CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106,
- /* DEVICE commands required Parameter: BusNumber, DeviceNumber */
- CONTROLVM_DEVICE_CREATE = 0x201,
- CONTROLVM_DEVICE_DESTROY = 0x202,
- CONTROLVM_DEVICE_CONFIGURE = 0x203,
- CONTROLVM_DEVICE_CHANGESTATE = 0x204,
- CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205,
- CONTROLVM_DEVICE_RECONFIGURE = 0x206,
- /* CHIPSET commands */
- CONTROLVM_CHIPSET_INIT = 0x301,
- CONTROLVM_CHIPSET_STOP = 0x302,
- CONTROLVM_CHIPSET_READY = 0x304,
- CONTROLVM_CHIPSET_SELFTEST = 0x305,
-};
-
-/*
- * struct irq_info
- * @reserved1: Natural alignment purposes
- * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the
- * corresponding interrupt pin from Monitor; and the interrupt
- * pin is used to connect to the corresponding interrupt.
- * Used by IOPart-GP only.
- * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared
- * are used to connect to the corresponding interrupt.
- * Used by IOPart-GP only.
- * @recv_irq_shared: Specifies if the recvInterrupt is shared. It, interrupt
- * pin and vector are used to connect to 0 = not shared;
- * 1 = shared the corresponding interrupt.
- * Used by IOPart-GP only.
- * @reserved: Natural alignment purposes
- */
-struct irq_info {
- u64 reserved1;
- u64 recv_irq_handle;
- u32 recv_irq_vector;
- u8 recv_irq_shared;
- u8 reserved[3];
-} __packed;
-
-/*
- * struct efi_visor_indication
- * @boot_to_fw_ui: Stop in UEFI UI
- * @clear_nvram: Clear NVRAM
- * @clear_cmos: Clear CMOS
- * @boot_to_tool: Run install tool
- * @reserved: Natural alignment
- */
-struct efi_visor_indication {
- u64 boot_to_fw_ui:1;
- u64 clear_nvram:1;
- u64 clear_cmos:1;
- u64 boot_to_tool:1;
- /* Remaining bits are available */
- u64 reserved:60;
-} __packed;
-
-enum visor_chipset_feature {
- VISOR_CHIPSET_FEATURE_REPLY = 0x00000001,
- VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
-};
-
-/*
- * struct controlvm_message_header
- * @id: See CONTROLVM_ID.
- * @message_size: Includes size of this struct + size of message.
- * @segment_index: Index of segment containing Vm message/information.
- * @completion_status: Error status code or result of message completion.
- * @struct flags:
- * @failed: =1 in a response to signify failure.
- * @response_expected: =1 in all messages that expect a response.
- * @server: =1 in all bus & device-related messages where the
- * message receiver is to act as the bus or device
- * server.
- * @test_message: =1 for testing use only (Control and Command
- * ignore this).
- * @partial_completion: =1 if there are forthcoming responses/acks
- * associated with this message.
- * @preserve: =1 this is to let us know to preserve channel
- * contents.
- * @writer_in_diag: =1 the DiagWriter is active in the Diagnostic
- * Partition.
- * @reserve: Natural alignment.
- * @reserved: Natural alignment.
- * @message_handle: Identifies the particular message instance.
- * @payload_vm_offset: Offset of payload area from start of this instance.
- * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm
- * segment.
- * @payload_bytes: Actual number of bytes of payload area to copy between
- * IO/Command. If non-zero, there is a payload to copy.
- *
- * This is the common structure that is at the beginning of every
- * ControlVm message (both commands and responses) in any ControlVm
- * queue. Commands are easily distinguished from responses by
- * looking at the flags.response field.
- */
-struct controlvm_message_header {
- u32 id;
- /*
- * For requests, indicates the message type. For responses, indicates
- * the type of message we are responding to.
- */
- u32 message_size;
- u32 segment_index;
- u32 completion_status;
- struct {
- u32 failed:1;
- u32 response_expected:1;
- u32 server:1;
- u32 test_message:1;
- u32 partial_completion:1;
- u32 preserve:1;
- u32 writer_in_diag:1;
- u32 reserve:25;
- } __packed flags;
- u32 reserved;
- u64 message_handle;
- u64 payload_vm_offset;
- u32 payload_max_bytes;
- u32 payload_bytes;
-} __packed;
-
-/*
- * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE
- * @bus_no: Bus # (0..n-1) from the msg receiver's end.
- * @dev_no: Bus-relative (0..n-1) device number.
- * @channel_addr: Guest physical address of the channel, which can be
- * dereferenced by the receiver of this ControlVm command.
- * @channel_bytes: Specifies size of the channel in bytes.
- * @data_type_uuid: Specifies format of data in channel.
- * @dev_inst_uuid: Instance guid for the device.
- * @irq_info intr: Specifies interrupt information.
- */
-struct controlvm_packet_device_create {
- u32 bus_no;
- u32 dev_no;
- u64 channel_addr;
- u64 channel_bytes;
- guid_t data_type_guid;
- guid_t dev_inst_guid;
- struct irq_info intr;
-} __packed;
-
-/*
- * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE
- * @bus_no: Bus number (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- */
-struct controlvm_packet_device_configure {
- u32 bus_no;
- u32 dev_no;
-} __packed;
-
-/* Total 128 bytes */
-struct controlvm_message_device_create {
- struct controlvm_message_header header;
- struct controlvm_packet_device_create packet;
-} __packed;
-
-/* Total 56 bytes */
-struct controlvm_message_device_configure {
- struct controlvm_message_header header;
- struct controlvm_packet_device_configure packet;
-} __packed;
-
-/*
- * struct controlvm_message_packet - This is the format for a message in any
- * ControlVm queue.
- * @struct create_bus: For CONTROLVM_BUS_CREATE.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_count: Indicates the max number of devices on this bus.
- * @channel_addr: Guest physical address of the channel, which can be
- * dereferenced by the receiver of this ControlVM
- * command.
- * @channel_bytes: Size of the channel.
- * @bus_data_type_uuid: Indicates format of data in bus channel.
- * @bus_inst_uuid: Instance uuid for the bus.
- *
- * @struct destroy_bus: For CONTROLVM_BUS_DESTROY.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @reserved: Natural alignment purposes.
- *
- * @struct configure_bus: For CONTROLVM_BUS_CONFIGURE.
- * @bus_no: Bus # (0..n-1) from the receiver's perspective.
- * @reserved1: For alignment purposes.
- * @guest_handle: This is used to convert guest physical address to
- * physical address.
- * @recv_bus_irq_handle: Specifies interrupt info. It is used by SP to
- * register to receive interrupts from the CP. This
- * interrupt is used for bus level notifications.
- * The corresponding sendBusInterruptHandle is kept
- * in CP.
- *
- * @struct create_device: For CONTROLVM_DEVICE_CREATE.
- *
- * @struct destroy_device: For CONTROLVM_DEVICE_DESTROY.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct configure_device: For CONTROLVM_DEVICE_CONFIGURE.
- *
- * @struct reconfigure_device: For CONTROLVM_DEVICE_RECONFIGURE.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct bus_change_state: For CONTROLVM_BUS_CHANGESTATE.
- * @bus_no:
- * @struct state:
- * @reserved: Natural alignment purposes.
- *
- * @struct device_change_state: For CONTROLVM_DEVICE_CHANGESTATE.
- * @bus_no:
- * @dev_no:
- * @struct state:
- * @struct flags:
- * @phys_device: =1 if message is for a physical device.
- * @reserved: Natural alignment.
- * @reserved1: Natural alignment.
- * @reserved: Natural alignment purposes.
- *
- * @struct device_change_state_event: For CONTROLVM_DEVICE_CHANGESTATE_EVENT.
- * @bus_no:
- * @dev_no:
- * @struct state:
- * @reserved: Natural alignment purposes.
- *
- * @struct init_chipset: For CONTROLVM_CHIPSET_INIT.
- * @bus_count: Indicates the max number of busses.
- * @switch_count: Indicates the max number of switches.
- * @enum features:
- * @platform_number:
- *
- * @struct chipset_selftest: For CONTROLVM_CHIPSET_SELFTEST.
- * @options: Reserved.
- * @test: Bit 0 set to run embedded selftest.
- *
- * @addr: A physical address of something, that can be dereferenced by the
- * receiver of this ControlVm command.
- *
- * @handle: A handle of something (depends on command id).
- */
-struct controlvm_message_packet {
- union {
- struct {
- u32 bus_no;
- u32 dev_count;
- u64 channel_addr;
- u64 channel_bytes;
- guid_t bus_data_type_guid;
- guid_t bus_inst_guid;
- } __packed create_bus;
- struct {
- u32 bus_no;
- u32 reserved;
- } __packed destroy_bus;
- struct {
- u32 bus_no;
- u32 reserved1;
- u64 guest_handle;
- u64 recv_bus_irq_handle;
- } __packed configure_bus;
- struct controlvm_packet_device_create create_device;
- struct {
- u32 bus_no;
- u32 dev_no;
- } __packed destroy_device;
- struct controlvm_packet_device_configure configure_device;
- struct {
- u32 bus_no;
- u32 dev_no;
- } __packed reconfigure_device;
- struct {
- u32 bus_no;
- struct visor_segment_state state;
- u8 reserved[2];
- } __packed bus_change_state;
- struct {
- u32 bus_no;
- u32 dev_no;
- struct visor_segment_state state;
- struct {
- u32 phys_device:1;
- u32 reserved:31;
- u32 reserved1;
- } __packed flags;
- u8 reserved[2];
- } __packed device_change_state;
- struct {
- u32 bus_no;
- u32 dev_no;
- struct visor_segment_state state;
- u8 reserved[6];
- } __packed device_change_state_event;
- struct {
- u32 bus_count;
- u32 switch_count;
- enum visor_chipset_feature features;
- u32 platform_number;
- } __packed init_chipset;
- struct {
- u32 options;
- u32 test;
- } __packed chipset_selftest;
- u64 addr;
- u64 handle;
- };
-} __packed;
-
-/* All messages in any ControlVm queue have this layout. */
-struct controlvm_message {
- struct controlvm_message_header hdr;
- struct controlvm_message_packet cmd;
-} __packed;
-
-/*
- * struct visor_controlvm_channel
- * @struct header:
- * @gp_controlvm: Guest phys addr of this channel.
- * @gp_partition_tables: Guest phys addr of partition tables.
- * @gp_diag_guest: Guest phys addr of diagnostic channel.
- * @gp_boot_romdisk: Guest phys addr of (read* only) Boot
- * ROM disk.
- * @gp_boot_ramdisk: Guest phys addr of writable Boot RAM
- * disk.
- * @gp_acpi_table: Guest phys addr of acpi table.
- * @gp_control_channel: Guest phys addr of control channel.
- * @gp_diag_romdisk: Guest phys addr of diagnostic ROM disk.
- * @gp_nvram: Guest phys addr of NVRAM channel.
- * @request_payload_offset: Offset to request payload area.
- * @event_payload_offset: Offset to event payload area.
- * @request_payload_bytes: Bytes available in request payload area.
- * @event_payload_bytes: Bytes available in event payload area.
- * @control_channel_bytes:
- * @nvram_channel_bytes: Bytes in PartitionNvram segment.
- * @message_bytes: sizeof(CONTROLVM_MESSAGE).
- * @message_count: CONTROLVM_MESSAGE_MAX.
- * @gp_smbios_table: Guest phys addr of SMBIOS tables.
- * @gp_physical_smbios_table: Guest phys addr of SMBIOS table.
- * @gp_reserved: VISOR_MAX_GUESTS_PER_SERVICE.
- * @virtual_guest_firmware_image_base: Guest physical address of EFI firmware
- * image base.
- * @virtual_guest_firmware_entry_point: Guest physical address of EFI firmware
- * entry point.
- * @virtual_guest_firmware_image_size: Guest EFI firmware image size.
- * @virtual_guest_firmware_boot_base: GPA = 1MB where EFI firmware image is
- * copied to.
- * @virtual_guest_image_base:
- * @virtual_guest_image_size:
- * @prototype_control_channel_offset:
- * @virtual_guest_partition_handle:
- * @restore_action: Restore Action field to restore the
- * guest partition.
- * @dump_action: For Windows guests it shows if the
- * visordisk is in dump mode.
- * @nvram_fail_count:
- * @saved_crash_message_count: = CONTROLVM_CRASHMSG_MAX.
- * @saved_crash_message_offset: Offset to request payload area needed
- * for crash dump.
- * @installation_error: Type of error encountered during
- * installation.
- * @installation_text_id: Id of string to display.
- * @installation_remaining_steps: Number of remaining installation steps
- * (for progress bars).
- * @tool_action: VISOR_TOOL_ACTIONS Installation Action
- * field.
- * @reserved: Alignment.
- * @struct efi_visor_ind:
- * @sp_reserved:
- * @reserved2: Force signals to begin on 128-byte
- * cache line.
- * @struct request_queue: Guest partition uses this queue to send
- * requests to Control.
- * @struct response_queue: Control uses this queue to respond to
- * service or guest partition request.
- * @struct event_queue: Control uses this queue to send events
- * to guest partition.
- * @struct event_ack_queue: Service or guest partition uses this
- * queue to ack Control events.
- * @struct request_msg: Request fixed-size message pool -
- * does not include payload.
- * @struct response_msg: Response fixed-size message pool -
- * does not include payload.
- * @struct event_msg: Event fixed-size message pool -
- * does not include payload.
- * @struct event_ack_msg: Ack fixed-size message pool -
- * does not include payload.
- * @struct saved_crash_msg: Message stored during IOVM creation to
- * be reused after crash.
- */
-struct visor_controlvm_channel {
- struct channel_header header;
- u64 gp_controlvm;
- u64 gp_partition_tables;
- u64 gp_diag_guest;
- u64 gp_boot_romdisk;
- u64 gp_boot_ramdisk;
- u64 gp_acpi_table;
- u64 gp_control_channel;
- u64 gp_diag_romdisk;
- u64 gp_nvram;
- u64 request_payload_offset;
- u64 event_payload_offset;
- u32 request_payload_bytes;
- u32 event_payload_bytes;
- u32 control_channel_bytes;
- u32 nvram_channel_bytes;
- u32 message_bytes;
- u32 message_count;
- u64 gp_smbios_table;
- u64 gp_physical_smbios_table;
- char gp_reserved[2688];
- u64 virtual_guest_firmware_image_base;
- u64 virtual_guest_firmware_entry_point;
- u64 virtual_guest_firmware_image_size;
- u64 virtual_guest_firmware_boot_base;
- u64 virtual_guest_image_base;
- u64 virtual_guest_image_size;
- u64 prototype_control_channel_offset;
- u64 virtual_guest_partition_handle;
- u16 restore_action;
- u16 dump_action;
- u16 nvram_fail_count;
- u16 saved_crash_message_count;
- u32 saved_crash_message_offset;
- u32 installation_error;
- u32 installation_text_id;
- u16 installation_remaining_steps;
- u8 tool_action;
- u8 reserved;
- struct efi_visor_indication efi_visor_ind;
- u32 sp_reserved;
- u8 reserved2[28];
- struct signal_queue_header request_queue;
- struct signal_queue_header response_queue;
- struct signal_queue_header event_queue;
- struct signal_queue_header event_ack_queue;
- struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
-} __packed;
-
-/*
- * struct visor_controlvm_parameters_header
- *
- * The following header will be located at the beginning of PayloadVmOffset for
- * various ControlVm commands. The receiver of a ControlVm command with a
- * PayloadVmOffset will dereference this address and then use connection_offset,
- * initiator_offset, and target_offset to get the location of UTF-8 formatted
- * strings that can be parsed to obtain command-specific information. The value
- * of total_length should equal PayloadBytes. The format of the strings at
- * PayloadVmOffset will take different forms depending on the message.
- */
-struct visor_controlvm_parameters_header {
- u32 total_length;
- u32 header_length;
- u32 connection_offset;
- u32 connection_length;
- u32 initiator_offset;
- u32 initiator_length;
- u32 target_offset;
- u32 target_length;
- u32 client_offset;
- u32 client_length;
- u32 name_offset;
- u32 name_length;
- guid_t id;
- u32 revision;
- /* Natural alignment */
- u32 reserved;
-} __packed;
-
-/* General Errors------------------------------------------------------[0-99] */
-#define CONTROLVM_RESP_SUCCESS 0
-#define CONTROLVM_RESP_ALREADY_DONE 1
-#define CONTROLVM_RESP_IOREMAP_FAILED 2
-#define CONTROLVM_RESP_KMALLOC_FAILED 3
-#define CONTROLVM_RESP_ID_UNKNOWN 4
-#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5
-/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
-#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100
-#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101
-/* Maximum Limit----------------------------------------------------[200-299] */
-/* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES 201
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202
-/* Payload and Parameter Related------------------------------------[400-499] */
-/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_PAYLOAD_INVALID 400
-/* Multiple */
-#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403
-/* Specified[Packet Structure] Value--------------------------------[500-599] */
-/* SWITCH_ATTACHINTPORT */
-/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */
-#define CONTROLVM_RESP_BUS_INVALID 500
-/* SWITCH_ATTACHINTPORT*/
-/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_DEVICE_INVALID 501
-/* DEVICE_CREATE, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CHANNEL_INVALID 502
-/* Partition Driver Callback Interface------------------------------[600-699] */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604
-/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605
-/* Generic device callback returned error. */
-/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606
-/* Bus Related------------------------------------------------------[700-799] */
-/* BUS_DESTROY */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700
-/* Channel Related--------------------------------------------------[800-899] */
-/* GET_CHANNELINFO, DEVICE_DESTROY */
-#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801
-/* Chipset Shutdown Related---------------------------------------[1000-1099] */
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
-/* Chipset Stop Related-------------------------------------------[1100-1199] */
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101
-/* Device Related-------------------------------------------------[1400-1499] */
-#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400
-
-/* __CONTROLVMCHANNEL_H__ */
-#endif
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
deleted file mode 100644
index 98711fb6d66e..000000000000
--- a/drivers/visorbus/vbuschannel.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VBUSCHANNEL_H__
-#define __VBUSCHANNEL_H__
-
-/*
- * The vbus channel is the channel area provided via the BUS_CREATE controlvm
- * message for each virtual bus. This channel area is provided to both server
- * and client ends of the bus. The channel header area is initialized by
- * the server, and the remaining information is filled in by the client.
- * We currently use this for the client to provide various information about
- * the client devices and client drivers for the server end to see.
- */
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define VISOR_VBUS_CHANNEL_GUID \
- GUID_INIT(0x193b331b, 0xc58f, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_VBUS_CHANNEL_VERSIONID 1
-
-/*
- * struct visor_vbus_deviceinfo
- * @devtype: Short string identifying the device type.
- * @drvname: Driver .sys file name.
- * @infostrs: Kernel vversion.
- * @reserved: Pad size to 256 bytes.
- *
- * An array of this struct is present in the channel area for each vbus. It is
- * filled in by the client side to provide info about the device and driver from
- * the client's perspective.
- */
-struct visor_vbus_deviceinfo {
- u8 devtype[16];
- u8 drvname[16];
- u8 infostrs[96];
- u8 reserved[128];
-} __packed;
-
-/*
- * struct visor_vbus_headerinfo
- * @struct_bytes: Size of this struct in bytes.
- * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO.
- * @dev_info_count: Num of items in DevInfo member. This is the
- * allocated size.
- * @chp_info_offset: Byte offset from beginning of this struct to the
- * ChpInfo struct.
- * @bus_info_offset: Byte offset from beginning of this struct to the
- * BusInfo struct.
- * @dev_info_offset: Byte offset from beginning of this struct to the
- * DevInfo array.
- * @reserved: Natural alignment.
- */
-struct visor_vbus_headerinfo {
- u32 struct_bytes;
- u32 device_info_struct_bytes;
- u32 dev_info_count;
- u32 chp_info_offset;
- u32 bus_info_offset;
- u32 dev_info_offset;
- u8 reserved[104];
-} __packed;
-
-/*
- * struct visor_vbus_channel
- * @channel_header: Initialized by server.
- * @hdr_info: Initialized by server.
- * @chp_info: Describes client chipset device and driver.
- * @bus_info: Describes client bus device and driver.
- * @dev_info: Describes client device and driver for each device on the
- * bus.
- */
-struct visor_vbus_channel {
- struct channel_header channel_header;
- struct visor_vbus_headerinfo hdr_info;
- struct visor_vbus_deviceinfo chp_info;
- struct visor_vbus_deviceinfo bus_info;
- struct visor_vbus_deviceinfo dev_info[];
-} __packed;
-
-#endif
diff --git a/drivers/visorbus/visorbus_main.c b/drivers/visorbus/visorbus_main.c
deleted file mode 100644
index 152fd29f04f2..000000000000
--- a/drivers/visorbus/visorbus_main.c
+++ /dev/null
@@ -1,1234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright � 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-#include <linux/uuid.h>
-
-#include "visorbus_private.h"
-
-static const guid_t visor_vbus_channel_guid = VISOR_VBUS_CHANNEL_GUID;
-
-/* Display string that is guaranteed to be no longer the 99 characters */
-#define LINESIZE 99
-#define POLLJIFFIES_NORMALCHANNEL 10
-
-/* stores whether bus_registration was successful */
-static bool initialized;
-static struct dentry *visorbus_debugfs_dir;
-
-/*
- * DEVICE type attributes
- *
- * The modalias file will contain the guid of the device.
- */
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev;
- const guid_t *guid;
-
- vdev = to_visor_device(dev);
- guid = visorchannel_get_guid(vdev->visorchannel);
- return sprintf(buf, "visorbus:%pUl\n", guid);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *visorbus_dev_attrs[] = {
- &dev_attr_modalias.attr,
- NULL,
-};
-
-ATTRIBUTE_GROUPS(visorbus_dev);
-
-/* filled in with info about parent chipset driver when we register with it */
-static struct visor_vbus_deviceinfo chipset_driverinfo;
-/* filled in with info about this driver, wrt it servicing client busses */
-static struct visor_vbus_deviceinfo clientbus_driverinfo;
-
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_bus_instances);
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_device_instances);
-
-/*
- * Generic function useful for validating any type of channel when it is
- * received by the client that will be accessing the channel.
- * Note that <logCtx> is only needed for callers in the EFI environment, and
- * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
- */
-int visor_check_channel(struct channel_header *ch, struct device *dev,
- const guid_t *expected_guid, char *chname,
- u64 expected_min_bytes, u32 expected_version,
- u64 expected_signature)
-{
- if (!guid_is_null(expected_guid)) {
- /* caller wants us to verify type GUID */
- if (!guid_equal(&ch->chtype, expected_guid)) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
- chname, expected_guid, expected_guid,
- &ch->chtype);
- return 0;
- }
- }
- /* verify channel size */
- if (expected_min_bytes > 0) {
- if (ch->size < expected_min_bytes) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, expected_guid,
- (unsigned long long)expected_min_bytes,
- ch->size);
- return 0;
- }
- }
- /* verify channel version */
- if (expected_version > 0) {
- if (ch->version_id != expected_version) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
- chname, expected_guid,
- (unsigned long)expected_version,
- ch->version_id);
- return 0;
- }
- }
- /* verify channel signature */
- if (expected_signature > 0) {
- if (ch->signature != expected_signature) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, expected_guid, expected_signature,
- ch->signature);
- return 0;
- }
- }
- return 1;
-}
-
-static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
-{
- struct visor_device *dev;
- const guid_t *guid;
-
- dev = to_visor_device(xdev);
- guid = visorchannel_get_guid(dev->visorchannel);
- return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
-}
-
-/*
- * visorbus_match() - called automatically upon adding a visor_device
- * (device_add), or adding a visor_driver
- * (visorbus_register_visor_driver)
- * @xdev: struct device for the device being matched
- * @xdrv: struct device_driver for driver to match device against
- *
- * Return: 1 iff the provided driver can control the specified device
- */
-static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
-{
- const guid_t *channel_type;
- int i;
- struct visor_device *dev;
- struct visor_driver *drv;
- struct visorchannel *chan;
-
- dev = to_visor_device(xdev);
- channel_type = visorchannel_get_guid(dev->visorchannel);
- drv = to_visor_driver(xdrv);
- chan = dev->visorchannel;
- if (!drv->channel_types)
- return 0;
- for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
- if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
- visor_check_channel(visorchannel_get_header(chan),
- xdev,
- &drv->channel_types[i].guid,
- (char *)drv->channel_types[i].name,
- drv->channel_types[i].min_bytes,
- drv->channel_types[i].version,
- VISOR_CHANNEL_SIGNATURE))
- return i + 1;
- return 0;
-}
-
-/*
- * This describes the TYPE of bus.
- * (Don't confuse this with an INSTANCE of the bus.)
- */
-static struct bus_type visorbus_type = {
- .name = "visorbus",
- .match = visorbus_match,
- .uevent = visorbus_uevent,
- .dev_groups = visorbus_dev_groups,
-};
-
-struct visor_busdev {
- u32 bus_no;
- u32 dev_no;
-};
-
-static int match_visorbus_dev_by_id(struct device *dev, const void *data)
-{
- struct visor_device *vdev = to_visor_device(dev);
- const struct visor_busdev *id = data;
-
- if (vdev->chipset_bus_no == id->bus_no &&
- vdev->chipset_dev_no == id->dev_no)
- return 1;
- return 0;
-}
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from)
-{
- struct device *dev;
- struct device *dev_start = NULL;
- struct visor_busdev id = {
- .bus_no = bus_no,
- .dev_no = dev_no
- };
-
- if (from)
- dev_start = &from->device;
- dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
- match_visorbus_dev_by_id);
- if (!dev)
- return NULL;
- return to_visor_device(dev);
-}
-
-/*
- * visorbus_release_busdevice() - called when device_unregister() is called for
- * the bus device instance, after all other tasks
- * involved with destroying the dev are complete
- * @xdev: struct device for the bus being released
- */
-static void visorbus_release_busdevice(struct device *xdev)
-{
- struct visor_device *dev = dev_get_drvdata(xdev);
-
- debugfs_remove(dev->debugfs_bus_info);
- debugfs_remove_recursive(dev->debugfs_dir);
- visorchannel_destroy(dev->visorchannel);
- kfree(dev);
-}
-
-/*
- * visorbus_release_device() - called when device_unregister() is called for
- * each child device instance
- * @xdev: struct device for the visor device being released
- */
-static void visorbus_release_device(struct device *xdev)
-{
- struct visor_device *dev = to_visor_device(xdev);
-
- visorchannel_destroy(dev->visorchannel);
- kfree(dev);
-}
-
-/*
- * BUS specific channel attributes to appear under
- * /sys/bus/visorbus<x>/dev<y>/channel
- */
-
-static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%llx\n",
- visorchannel_get_physaddr(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(physaddr);
-
-static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%lx\n",
- visorchannel_get_nbytes(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(nbytes);
-
-static ssize_t clientpartition_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%llx\n",
- visorchannel_get_clientpartition(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(clientpartition);
-
-static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- char typeid[LINESIZE];
-
- return sprintf(buf, "%s\n",
- visorchannel_id(vdev->visorchannel, typeid));
-}
-static DEVICE_ATTR_RO(typeguid);
-
-static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- char zoneid[LINESIZE];
-
- return sprintf(buf, "%s\n",
- visorchannel_zoneid(vdev->visorchannel, zoneid));
-}
-static DEVICE_ATTR_RO(zoneguid);
-
-static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int i = 0;
- struct bus_type *xbus = dev->bus;
- struct device_driver *xdrv = dev->driver;
- struct visor_driver *drv = NULL;
-
- if (!xdrv)
- return 0;
- i = xbus->match(dev, xdrv);
- if (!i)
- return 0;
- drv = to_visor_driver(xdrv);
- return sprintf(buf, "%s\n", drv->channel_types[i - 1].name);
-}
-static DEVICE_ATTR_RO(typename);
-
-static struct attribute *channel_attrs[] = {
- &dev_attr_physaddr.attr,
- &dev_attr_nbytes.attr,
- &dev_attr_clientpartition.attr,
- &dev_attr_typeguid.attr,
- &dev_attr_zoneguid.attr,
- &dev_attr_typename.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(channel);
-
-/*
- * BUS instance attributes
- *
- * define & implement display of bus attributes under
- * /sys/bus/visorbus/devices/visorbus<n>.
- */
-static ssize_t partition_handle_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", handle);
-}
-static DEVICE_ATTR_RO(partition_handle);
-
-static ssize_t partition_guid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "{%pUb}\n", &vdev->partition_guid);
-}
-static DEVICE_ATTR_RO(partition_guid);
-
-static ssize_t partition_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "%s\n", vdev->name);
-}
-static DEVICE_ATTR_RO(partition_name);
-
-static ssize_t channel_addr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", addr);
-}
-static DEVICE_ATTR_RO(channel_addr);
-
-static ssize_t channel_bytes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", nbytes);
-}
-static DEVICE_ATTR_RO(channel_bytes);
-
-static ssize_t channel_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- int len = 0;
-
- visorchannel_id(vdev->visorchannel, buf);
- len = strlen(buf);
- buf[len++] = '\n';
- return len;
-}
-static DEVICE_ATTR_RO(channel_id);
-
-static struct attribute *visorbus_attrs[] = {
- &dev_attr_partition_handle.attr,
- &dev_attr_partition_guid.attr,
- &dev_attr_partition_name.attr,
- &dev_attr_channel_addr.attr,
- &dev_attr_channel_bytes.attr,
- &dev_attr_channel_id.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(visorbus);
-
-/*
- * BUS debugfs entries
- *
- * define & implement display of debugfs attributes under
- * /sys/kernel/debug/visorbus/visorbus<n>.
- */
-
-/*
- * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
- * and write it to a seq_file
- * @devinfo: the struct visor_vbus_deviceinfo to format
- * @seq: seq_file to write to
- * @devix: the device index to be included in the output data, or -1 if no
- * device index is to be included
- *
- * Reads @devInfo, and writes it in human-readable notation to @seq.
- */
-static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
- struct seq_file *seq, int devix)
-{
- /* uninitialized vbus device entry */
- if (!isprint(devinfo->devtype[0]))
- return;
- if (devix >= 0)
- seq_printf(seq, "[%d]", devix);
- else
- /* vbus device entry is for bus or chipset */
- seq_puts(seq, " ");
- /*
- * Note: because the s-Par back-end is free to scribble in this area,
- * we never assume '\0'-termination.
- */
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
- (int)sizeof(devinfo->devtype), devinfo->devtype);
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
- (int)sizeof(devinfo->drvname), devinfo->drvname);
- seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
- devinfo->infostrs);
-}
-
-static int bus_info_debugfs_show(struct seq_file *seq, void *v)
-{
- int i = 0;
- unsigned long off;
- struct visor_vbus_deviceinfo dev_info;
- struct visor_device *vdev = seq->private;
- struct visorchannel *channel = vdev->visorchannel;
-
- if (!channel)
- return 0;
-
- seq_printf(seq,
- "Client device/driver info for %s partition (vbus #%u):\n",
- ((vdev->name) ? (char *)(vdev->name) : ""),
- vdev->chipset_bus_no);
- if (visorchannel_read(channel,
- offsetof(struct visor_vbus_channel, chp_info),
- &dev_info, sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, -1);
- if (visorchannel_read(channel,
- offsetof(struct visor_vbus_channel, bus_info),
- &dev_info, sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, -1);
-
- off = offsetof(struct visor_vbus_channel, dev_info);
- while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
- if (visorchannel_read(channel, off, &dev_info,
- sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, i);
- off += sizeof(dev_info);
- i++;
- }
- return 0;
-}
-
-static int bus_info_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bus_info_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations bus_info_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = bus_info_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void dev_periodic_work(struct timer_list *t)
-{
- struct visor_device *dev = from_timer(dev, t, timer);
- struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
- drv->channel_interrupt(dev);
- mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
-}
-
-static int dev_start_periodic_work(struct visor_device *dev)
-{
- if (dev->being_removed || dev->timer_active)
- return -EINVAL;
-
- /* now up by at least 2 */
- get_device(&dev->device);
- dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
- add_timer(&dev->timer);
- dev->timer_active = true;
- return 0;
-}
-
-static void dev_stop_periodic_work(struct visor_device *dev)
-{
- if (!dev->timer_active)
- return;
-
- del_timer_sync(&dev->timer);
- dev->timer_active = false;
- put_device(&dev->device);
-}
-
-/*
- * visordriver_remove_device() - handle visor device going away
- * @xdev: struct device for the visor device being removed
- *
- * This is called when device_unregister() is called for each child device
- * instance, to notify the appropriate visorbus function driver that the device
- * is going away, and to decrease the reference count of the device.
- *
- * Return: 0 iff successful
- */
-static int visordriver_remove_device(struct device *xdev)
-{
- struct visor_device *dev = to_visor_device(xdev);
- struct visor_driver *drv = to_visor_driver(xdev->driver);
-
- mutex_lock(&dev->visordriver_callback_lock);
- dev->being_removed = true;
- drv->remove(dev);
- mutex_unlock(&dev->visordriver_callback_lock);
- dev_stop_periodic_work(dev);
- put_device(&dev->device);
- return 0;
-}
-
-/*
- * visorbus_unregister_visor_driver() - unregisters the provided driver
- * @drv: the driver to unregister
- *
- * A visor function driver calls this function to unregister the driver,
- * i.e., within its module_exit function.
- */
-void visorbus_unregister_visor_driver(struct visor_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
-
-/*
- * visorbus_read_channel() - reads from the designated channel into
- * the provided buffer
- * @dev: the device whose channel is read from
- * @offset: the offset into the channel at which reading starts
- * @dest: the destination buffer that is written into from the channel
- * @nbytes: the number of bytes to read from the channel
- *
- * If receiving a message, use the visorchannel_signalremove() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_read_channel(struct visor_device *dev, unsigned long offset,
- void *dest, unsigned long nbytes)
-{
- return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_read_channel);
-
-/*
- * visorbus_write_channel() - writes the provided buffer into the designated
- * channel
- * @dev: the device whose channel is written to
- * @offset: the offset into the channel at which writing starts
- * @src: the source buffer that is written into the channel
- * @nbytes: the number of bytes to write into the channel
- *
- * If sending a message, use the visorchannel_signalinsert() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_write_channel(struct visor_device *dev, unsigned long offset,
- void *src, unsigned long nbytes)
-{
- return visorchannel_write(dev->visorchannel, offset, src, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_write_channel);
-
-/*
- * visorbus_enable_channel_interrupts() - enables interrupts on the
- * designated device
- * @dev: the device on which to enable interrupts
- *
- * Currently we don't yet have a real interrupt, so for now we just call the
- * interrupt function periodically via a timer.
- */
-int visorbus_enable_channel_interrupts(struct visor_device *dev)
-{
- struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
- if (!drv->channel_interrupt) {
- dev_err(&dev->device, "%s no interrupt function!\n", __func__);
- return -ENOENT;
- }
-
- return dev_start_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
-
-/*
- * visorbus_disable_channel_interrupts() - disables interrupts on the
- * designated device
- * @dev: the device on which to disable interrupts
- */
-void visorbus_disable_channel_interrupts(struct visor_device *dev)
-{
- dev_stop_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
-
-/*
- * create_visor_device() - create visor device as a result of receiving the
- * controlvm device_create message for a new device
- * @dev: a freshly-zeroed struct visor_device, containing only filled-in values
- * for chipset_bus_no and chipset_dev_no, that will be initialized
- *
- * This is how everything starts from the device end.
- * This function is called when a channel first appears via a ControlVM
- * message. In response, this function allocates a visor_device to correspond
- * to the new channel, and attempts to connect it the appropriate * driver. If
- * the appropriate driver is found, the visor_driver.probe() function for that
- * driver will be called, and will be passed the new * visor_device that we
- * just created.
- *
- * It's ok if the appropriate driver is not yet loaded, because in that case
- * the new device struct will just stick around in the bus' list of devices.
- * When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new device.
- *
- * Return: 0 if successful, otherwise the negative value returned by
- * device_add() indicating the reason for failure
- */
-int create_visor_device(struct visor_device *dev)
-{
- int err;
- u32 chipset_bus_no = dev->chipset_bus_no;
- u32 chipset_dev_no = dev->chipset_dev_no;
-
- mutex_init(&dev->visordriver_callback_lock);
- dev->device.bus = &visorbus_type;
- dev->device.groups = channel_groups;
- device_initialize(&dev->device);
- dev->device.release = visorbus_release_device;
- /* keep a reference just for us (now 2) */
- get_device(&dev->device);
- timer_setup(&dev->timer, dev_periodic_work, 0);
- /*
- * bus_id must be a unique name with respect to this bus TYPE (NOT bus
- * instance). That's why we need to include the bus number within the
- * name.
- */
- err = dev_set_name(&dev->device, "vbus%u:dev%u",
- chipset_bus_no, chipset_dev_no);
- if (err)
- goto err_put;
- /*
- * device_add does this:
- * bus_add_device(dev)
- * ->device_attach(dev)
- * ->for each driver drv registered on the bus that dev is on
- * if (dev.drv) ** device already has a driver **
- * ** not sure we could ever get here... **
- * else
- * if (bus.match(dev,drv)) [visorbus_match]
- * dev.drv = drv
- * if (!drv.probe(dev)) [visordriver_probe_device]
- * dev.drv = NULL
- *
- * Note that device_add does NOT fail if no driver failed to claim the
- * device. The device will be linked onto bus_type.klist_devices
- * regardless (use bus_for_each_dev).
- */
- err = device_add(&dev->device);
- if (err < 0)
- goto err_put;
- list_add_tail(&dev->list_all, &list_all_device_instances);
- dev->state.created = 1;
- visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
- /* success: reference kept via unmatched get_device() */
- return 0;
-
-err_put:
- put_device(&dev->device);
- dev_err(&dev->device, "Creating visor device failed. %d\n", err);
- return err;
-}
-
-void remove_visor_device(struct visor_device *dev)
-{
- list_del(&dev->list_all);
- put_device(&dev->device);
- if (dev->pending_msg_hdr)
- visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
- device_unregister(&dev->device);
-}
-
-static int get_vbus_header_info(struct visorchannel *chan,
- struct device *dev,
- struct visor_vbus_headerinfo *hdr_info)
-{
- int err;
-
- if (!visor_check_channel(visorchannel_get_header(chan),
- dev,
- &visor_vbus_channel_guid,
- "vbus",
- sizeof(struct visor_vbus_channel),
- VISOR_VBUS_CHANNEL_VERSIONID,
- VISOR_CHANNEL_SIGNATURE))
- return -EINVAL;
-
- err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
- sizeof(*hdr_info));
- if (err < 0)
- return err;
- if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
- return -EINVAL;
- if (hdr_info->device_info_struct_bytes <
- sizeof(struct visor_vbus_deviceinfo))
- return -EINVAL;
- return 0;
-}
-
-/*
- * write_vbus_chp_info() - write the contents of <info> to the struct
- * visor_vbus_channel.chp_info
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- *
- * Writes chipset info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_chp_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
-{
- int off;
-
- if (hdr_info->chp_info_offset == 0)
- return;
-
- off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_bus_info() - write the contents of <info> to the struct
- * visor_vbus_channel.bus_info
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- *
- * Writes bus info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_bus_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
-{
- int off;
-
- if (hdr_info->bus_info_offset == 0)
- return;
-
- off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_dev_info() - write the contents of <info> to the struct
- * visor_vbus_channel.dev_info[<devix>]
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- * @devix: the relative device number (0..n-1) of the device on the bus
- *
- * Writes device info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_dev_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info,
- unsigned int devix)
-{
- int off;
-
- if (hdr_info->dev_info_offset == 0)
- return;
- off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
- (hdr_info->device_info_struct_bytes * devix);
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-static void bus_device_info_init(
- struct visor_vbus_deviceinfo *bus_device_info_ptr,
- const char *dev_type, const char *drv_name)
-{
- memset(bus_device_info_ptr, 0, sizeof(struct visor_vbus_deviceinfo));
- snprintf(bus_device_info_ptr->devtype,
- sizeof(bus_device_info_ptr->devtype),
- "%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvname,
- sizeof(bus_device_info_ptr->drvname),
- "%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infostrs,
- sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
- utsname()->release);
-}
-
-/*
- * publish_vbus_dev_info() - for a child device just created on a client bus,
- * fill in information about the driver that is
- * controlling this device into the appropriate slot
- * within the vbus channel of the bus instance
- * @visordev: struct visor_device for the desired device
- */
-static void publish_vbus_dev_info(struct visor_device *visordev)
-{
- int i;
- struct visor_device *bdev;
- struct visor_driver *visordrv;
- u32 bus_no = visordev->chipset_bus_no;
- u32 dev_no = visordev->chipset_dev_no;
- struct visor_vbus_deviceinfo dev_info;
- const char *chan_type_name = NULL;
- struct visor_vbus_headerinfo *hdr_info;
-
- if (!visordev->device.driver)
- return;
- bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bdev)
- return;
- hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
- if (!hdr_info)
- return;
- visordrv = to_visor_driver(visordev->device.driver);
-
- /*
- * Within the list of device types (by GUID) that the driver
- * says it supports, find out which one of those types matches
- * the type of this device, so that we can include the device
- * type name
- */
- for (i = 0; visordrv->channel_types[i].name; i++) {
- if (guid_equal(&visordrv->channel_types[i].guid,
- &visordev->channel_type_guid)) {
- chan_type_name = visordrv->channel_types[i].name;
- break;
- }
- }
- bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
- write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
- write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
- write_vbus_bus_info(bdev->visorchannel, hdr_info,
- &clientbus_driverinfo);
-}
-
-/*
- * visordriver_probe_device() - handle new visor device coming online
- * @xdev: struct device for the visor device being probed
- *
- * This is called automatically upon adding a visor_device (device_add), or
- * adding a visor_driver (visorbus_register_visor_driver), but only after
- * visorbus_match() has returned 1 to indicate a successful match between
- * driver and device.
- *
- * If successful, a reference to the device will be held onto via get_device().
- *
- * Return: 0 if successful, meaning the function driver's probe() function
- * was successful with this device, otherwise a negative errno
- * value indicating failure reason
- */
-static int visordriver_probe_device(struct device *xdev)
-{
- int err;
- struct visor_driver *drv = to_visor_driver(xdev->driver);
- struct visor_device *dev = to_visor_device(xdev);
-
- mutex_lock(&dev->visordriver_callback_lock);
- dev->being_removed = false;
- err = drv->probe(dev);
- if (err) {
- mutex_unlock(&dev->visordriver_callback_lock);
- return err;
- }
- /* success: reference kept via unmatched get_device() */
- get_device(&dev->device);
- publish_vbus_dev_info(dev);
- mutex_unlock(&dev->visordriver_callback_lock);
- return 0;
-}
-
-/*
- * visorbus_register_visor_driver() - registers the provided visor driver for
- * handling one or more visor device
- * types (channel_types)
- * @drv: the driver to register
- *
- * A visor function driver calls this function to register the driver. The
- * caller MUST fill in the following fields within the #drv structure:
- * name, version, owner, channel_types, probe, remove
- *
- * Here's how the whole Linux bus / driver / device model works.
- *
- * At system start-up, the visorbus kernel module is loaded, which registers
- * visorbus_type as a bus type, using bus_register().
- *
- * All kernel modules that support particular device types on a
- * visorbus bus are loaded. Each of these kernel modules calls
- * visorbus_register_visor_driver() in their init functions, passing a
- * visor_driver struct. visorbus_register_visor_driver() in turn calls
- * register_driver(&visor_driver.driver). This .driver member is
- * initialized with generic methods (like probe), whose sole responsibility
- * is to act as a broker for the real methods, which are within the
- * visor_driver struct. (This is the way the subclass behavior is
- * implemented, since visor_driver is essentially a subclass of the
- * generic driver.) Whenever a driver_register() happens, core bus code in
- * the kernel does (see device_attach() in drivers/base/dd.c):
- *
- * for each dev associated with the bus (the bus that driver is on) that
- * does not yet have a driver
- * if bus.match(dev,newdriver) == yes_matched ** .match specified
- * ** during bus_register().
- * newdriver.probe(dev) ** for visor drivers, this will call
- * ** the generic driver.probe implemented in visorbus.c,
- * ** which in turn calls the probe specified within the
- * ** struct visor_driver (which was specified by the
- * ** actual device driver as part of
- * ** visorbus_register_visor_driver()).
- *
- * The above dance also happens when a new device appears.
- * So the question is, how are devices created within the system?
- * Basically, just call device_add(dev). See pci_bus_add_devices().
- * pci_scan_device() shows an example of how to build a device struct. It
- * returns the newly-created struct to pci_scan_single_device(), who adds it
- * to the list of devices at PCIBUS.devices. That list of devices is what
- * is traversed by pci_bus_add_devices().
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_register_visor_driver(struct visor_driver *drv)
-{
- /* can't register on a nonexistent bus */
- if (!initialized)
- return -ENODEV;
- if (!drv->probe)
- return -EINVAL;
- if (!drv->remove)
- return -EINVAL;
- if (!drv->pause)
- return -EINVAL;
- if (!drv->resume)
- return -EINVAL;
-
- drv->driver.name = drv->name;
- drv->driver.bus = &visorbus_type;
- drv->driver.probe = visordriver_probe_device;
- drv->driver.remove = visordriver_remove_device;
- drv->driver.owner = drv->owner;
- /*
- * driver_register does this:
- * bus_add_driver(drv)
- * ->if (drv.bus) ** (bus_type) **
- * driver_attach(drv)
- * for each dev with bus type of drv.bus
- * if (!dev.drv) ** no driver assigned yet **
- * if (bus.match(dev,drv)) [visorbus_match]
- * dev.drv = drv
- * if (!drv.probe(dev)) [visordriver_probe_device]
- * dev.drv = NULL
- */
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
-
-/*
- * visorbus_create_instance() - create a device instance for the visorbus itself
- * @dev: struct visor_device indicating the bus instance
- *
- * Return: 0 for success, otherwise negative errno value indicating reason for
- * failure
- */
-int visorbus_create_instance(struct visor_device *dev)
-{
- int id = dev->chipset_bus_no;
- int err;
- struct visor_vbus_headerinfo *hdr_info;
-
- hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
- if (!hdr_info)
- return -ENOMEM;
- dev_set_name(&dev->device, "visorbus%d", id);
- dev->device.bus = &visorbus_type;
- dev->device.groups = visorbus_groups;
- dev->device.release = visorbus_release_busdevice;
- dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
- visorbus_debugfs_dir);
- dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
- dev->debugfs_dir, dev,
- &bus_info_debugfs_fops);
- dev_set_drvdata(&dev->device, dev);
- err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
- if (err < 0)
- goto err_debugfs_dir;
- err = device_register(&dev->device);
- if (err < 0)
- goto err_debugfs_dir;
- list_add_tail(&dev->list_all, &list_all_bus_instances);
- dev->state.created = 1;
- dev->vbus_hdr_info = (void *)hdr_info;
- write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
- write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
- visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
- return 0;
-
-err_debugfs_dir:
- debugfs_remove_recursive(dev->debugfs_dir);
- kfree(hdr_info);
- dev_err(&dev->device, "%s failed: %d\n", __func__, err);
- return err;
-}
-
-/*
- * visorbus_remove_instance() - remove a device instance for the visorbus itself
- * @dev: struct visor_device indentifying the bus to remove
- */
-void visorbus_remove_instance(struct visor_device *dev)
-{
- /*
- * Note that this will result in the release method for
- * dev->dev being called, which will call
- * visorbus_release_busdevice(). This has something to do with
- * the put_device() done in device_unregister(), but I have never
- * successfully been able to trace thru the code to see where/how
- * release() gets called. But I know it does.
- */
- kfree(dev->vbus_hdr_info);
- list_del(&dev->list_all);
- if (dev->pending_msg_hdr)
- visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
- device_unregister(&dev->device);
-}
-
-/*
- * remove_all_visor_devices() - remove all child visorbus device instances
- */
-static void remove_all_visor_devices(void)
-{
- struct list_head *listentry, *listtmp;
-
- list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
- struct visor_device *dev;
-
- dev = list_entry(listentry, struct visor_device, list_all);
- remove_visor_device(dev);
- }
-}
-
-/*
- * pause_state_change_complete() - the callback function to be called by a
- * visorbus function driver when a
- * pending "pause device" operation has
- * completed
- * @dev: struct visor_device identifying the paused device
- * @status: 0 iff the pause state change completed successfully, otherwise
- * a negative errno value indicating the reason for failure
- */
-static void pause_state_change_complete(struct visor_device *dev, int status)
-{
- if (!dev->pausing)
- return;
-
- dev->pausing = false;
- visorbus_device_changestate_response(dev, status,
- segment_state_standby);
-}
-
-/*
- * resume_state_change_complete() - the callback function to be called by a
- * visorbus function driver when a
- * pending "resume device" operation has
- * completed
- * @dev: struct visor_device identifying the resumed device
- * @status: 0 iff the resume state change completed successfully, otherwise
- * a negative errno value indicating the reason for failure
- */
-static void resume_state_change_complete(struct visor_device *dev, int status)
-{
- if (!dev->resuming)
- return;
-
- dev->resuming = false;
- /*
- * Notify the chipset driver that the resume is complete,
- * which will presumably want to send some sort of response to
- * the initiator.
- */
- visorbus_device_changestate_response(dev, status,
- segment_state_running);
-}
-
-/*
- * visorchipset_initiate_device_pause_resume() - start a pause or resume
- * operation for a visor device
- * @dev: struct visor_device identifying the device being paused or resumed
- * @is_pause: true to indicate pause operation, false to indicate resume
- *
- * Tell the subordinate function driver for a specific device to pause
- * or resume that device. Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete() and
- * resume_state_change_complete().
- */
-static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
- bool is_pause)
-{
- int err;
- struct visor_driver *drv;
-
- /* If no driver associated with the device nothing to pause/resume */
- if (!dev->device.driver)
- return 0;
- if (dev->pausing || dev->resuming)
- return -EBUSY;
-
- drv = to_visor_driver(dev->device.driver);
- if (is_pause) {
- dev->pausing = true;
- err = drv->pause(dev, pause_state_change_complete);
- } else {
- /*
- * The vbus_dev_info structure in the channel was been cleared,
- * make sure it is valid.
- */
- publish_vbus_dev_info(dev);
- dev->resuming = true;
- err = drv->resume(dev, resume_state_change_complete);
- }
- return err;
-}
-
-/*
- * visorchipset_device_pause() - start a pause operation for a visor device
- * @dev_info: struct visor_device identifying the device being paused
- *
- * Tell the subordinate function driver for a specific device to pause
- * that device. Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete().
- */
-int visorchipset_device_pause(struct visor_device *dev_info)
-{
- int err;
-
- err = visorchipset_initiate_device_pause_resume(dev_info, true);
- if (err < 0) {
- dev_info->pausing = false;
- return err;
- }
- return 0;
-}
-
-/*
- * visorchipset_device_resume() - start a resume operation for a visor device
- * @dev_info: struct visor_device identifying the device being resumed
- *
- * Tell the subordinate function driver for a specific device to resume
- * that device. Success/failure result is returned asynchronously
- * via a callback function; see resume_state_change_complete().
- */
-int visorchipset_device_resume(struct visor_device *dev_info)
-{
- int err;
-
- err = visorchipset_initiate_device_pause_resume(dev_info, false);
- if (err < 0) {
- dev_info->resuming = false;
- return err;
- }
- return 0;
-}
-
-int visorbus_init(void)
-{
- int err;
-
- visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
- bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
- err = bus_register(&visorbus_type);
- if (err < 0)
- return err;
- initialized = true;
- bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
- return 0;
-}
-
-void visorbus_exit(void)
-{
- struct list_head *listentry, *listtmp;
-
- remove_all_visor_devices();
- list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
- struct visor_device *dev;
-
- dev = list_entry(listentry, struct visor_device, list_all);
- visorbus_remove_instance(dev);
- }
- bus_unregister(&visorbus_type);
- initialized = false;
- debugfs_remove_recursive(visorbus_debugfs_dir);
-}
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
deleted file mode 100644
index 6956de605827..000000000000
--- a/drivers/visorbus/visorbus_private.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VISORBUS_PRIVATE_H__
-#define __VISORBUS_PRIVATE_H__
-
-#include <linux/uuid.h>
-#include <linux/utsname.h>
-#include <linux/visorbus.h>
-
-#include "controlvmchannel.h"
-#include "vbuschannel.h"
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from);
-int visorbus_create_instance(struct visor_device *dev);
-void visorbus_remove_instance(struct visor_device *bus_info);
-int create_visor_device(struct visor_device *dev_info);
-void remove_visor_device(struct visor_device *dev_info);
-int visorchipset_device_pause(struct visor_device *dev_info);
-int visorchipset_device_resume(struct visor_device *dev_info);
-void visorbus_response(struct visor_device *p, int response, int controlvm_id);
-void visorbus_device_changestate_response(struct visor_device *p, int response,
- struct visor_segment_state state);
-int visorbus_init(void);
-void visorbus_exit(void);
-
-/* visorchannel access functions */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid, bool needs_lock);
-void visorchannel_destroy(struct visorchannel *channel);
-int visorchannel_read(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes);
-int visorchannel_write(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes);
-u64 visorchannel_get_physaddr(struct visorchannel *channel);
-ulong visorchannel_get_nbytes(struct visorchannel *channel);
-char *visorchannel_id(struct visorchannel *channel, char *s);
-char *visorchannel_zoneid(struct visorchannel *channel, char *s);
-u64 visorchannel_get_clientpartition(struct visorchannel *channel);
-int visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle);
-char *visorchannel_guid_id(const guid_t *guid, char *s);
-void *visorchannel_get_header(struct visorchannel *channel);
-#endif
diff --git a/drivers/visorbus/visorchannel.c b/drivers/visorbus/visorchannel.c
deleted file mode 100644
index bd890e0f456b..000000000000
--- a/drivers/visorbus/visorchannel.c
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This provides s-Par channel communication primitives, which are
- * independent of the mechanism used to access the channel data.
- */
-
-#include <linux/uuid.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-#include "controlvmchannel.h"
-
-#define VISOR_DRV_NAME "visorchannel"
-
-#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
- GUID_INIT(0x3cd6e705, 0xd6a2, 0x4aa5, \
- 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
-
-static const guid_t visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID;
-
-struct visorchannel {
- u64 physaddr;
- ulong nbytes;
- void *mapped;
- bool requested;
- struct channel_header chan_hdr;
- guid_t guid;
- /*
- * channel creator knows if more than one thread will be inserting or
- * removing
- */
- bool needs_lock;
- /* protect head writes in chan_hdr */
- spinlock_t insert_lock;
- /* protect tail writes in chan_hdr */
- spinlock_t remove_lock;
- guid_t type;
- guid_t inst;
-};
-
-void visorchannel_destroy(struct visorchannel *channel)
-{
- if (!channel)
- return;
-
- if (channel->mapped) {
- memunmap(channel->mapped);
- if (channel->requested)
- release_mem_region(channel->physaddr, channel->nbytes);
- }
- kfree(channel);
-}
-
-u64 visorchannel_get_physaddr(struct visorchannel *channel)
-{
- return channel->physaddr;
-}
-
-ulong visorchannel_get_nbytes(struct visorchannel *channel)
-{
- return channel->nbytes;
-}
-
-char *visorchannel_guid_id(const guid_t *guid, char *s)
-{
- sprintf(s, "%pUL", guid);
- return s;
-}
-
-char *visorchannel_id(struct visorchannel *channel, char *s)
-{
- return visorchannel_guid_id(&channel->guid, s);
-}
-
-char *visorchannel_zoneid(struct visorchannel *channel, char *s)
-{
- return visorchannel_guid_id(&channel->chan_hdr.zone_guid, s);
-}
-
-u64 visorchannel_get_clientpartition(struct visorchannel *channel)
-{
- return channel->chan_hdr.partition_handle;
-}
-
-int visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle)
-{
- channel->chan_hdr.partition_handle = partition_handle;
- return 0;
-}
-
-/**
- * visorchannel_get_guid() - queries the GUID of the designated channel
- * @channel: the channel to query
- *
- * Return: the GUID of the provided channel
- */
-const guid_t *visorchannel_get_guid(struct visorchannel *channel)
-{
- return &channel->guid;
-}
-EXPORT_SYMBOL_GPL(visorchannel_get_guid);
-
-int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
- ulong nbytes)
-{
- if (offset + nbytes > channel->nbytes)
- return -EIO;
-
- memcpy(dest, channel->mapped + offset, nbytes);
- return 0;
-}
-
-int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
- ulong nbytes)
-{
- size_t chdr_size = sizeof(struct channel_header);
- size_t copy_size;
-
- if (offset + nbytes > channel->nbytes)
- return -EIO;
-
- if (offset < chdr_size) {
- copy_size = min(chdr_size - offset, nbytes);
- memcpy(((char *)(&channel->chan_hdr)) + offset,
- dest, copy_size);
- }
- memcpy(channel->mapped + offset, dest, nbytes);
- return 0;
-}
-
-void *visorchannel_get_header(struct visorchannel *channel)
-{
- return &channel->chan_hdr;
-}
-
-/*
- * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
- * channel header
- */
-static int sig_queue_offset(struct channel_header *chan_hdr, int q)
-{
- return ((chan_hdr)->ch_space_offset +
- ((q) * sizeof(struct signal_queue_header)));
-}
-
-/*
- * Return offset of a specific queue entry (data) from the beginning of a
- * channel header
- */
-static int sig_data_offset(struct channel_header *chan_hdr, int q,
- struct signal_queue_header *sig_hdr, int slot)
-{
- return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset +
- (slot * sig_hdr->signal_size));
-}
-
-/*
- * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
- * host memory
- */
-#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
- visorchannel_write(channel, \
- sig_queue_offset(&channel->chan_hdr, queue) + \
- offsetof(struct signal_queue_header, FIELD), \
- &((sig_hdr)->FIELD), \
- sizeof((sig_hdr)->FIELD))
-
-static int sig_read_header(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr)
-{
- if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
- return -EINVAL;
-
- /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
- return visorchannel_read(channel,
- sig_queue_offset(&channel->chan_hdr, queue),
- sig_hdr, sizeof(struct signal_queue_header));
-}
-
-static int sig_read_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot,
- void *data)
-{
- int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
- sig_hdr, slot);
-
- return visorchannel_read(channel, signal_data_offset,
- data, sig_hdr->signal_size);
-}
-
-static int sig_write_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot,
- void *data)
-{
- int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
- sig_hdr, slot);
-
- return visorchannel_write(channel, signal_data_offset,
- data, sig_hdr->signal_size);
-}
-
-static int signalremove_inner(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- struct signal_queue_header sig_hdr;
- int error;
-
- error = sig_read_header(channel, queue, &sig_hdr);
- if (error)
- return error;
- /* No signals to remove; have caller try again. */
- if (sig_hdr.head == sig_hdr.tail)
- return -EAGAIN;
- sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
- error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
- if (error)
- return error;
- sig_hdr.num_received++;
- /*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
- * host memory. Required for channel sync.
- */
- mb();
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
- if (error)
- return error;
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
- if (error)
- return error;
- return 0;
-}
-
-/**
- * visorchannel_signalremove() - removes a message from the designated
- * channel/queue
- * @channel: the channel the message will be removed from
- * @queue: the queue the message will be removed from
- * @msg: the message to remove
- *
- * Return: integer error code indicating the status of the removal
- */
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- int rc;
- unsigned long flags;
-
- if (channel->needs_lock) {
- spin_lock_irqsave(&channel->remove_lock, flags);
- rc = signalremove_inner(channel, queue, msg);
- spin_unlock_irqrestore(&channel->remove_lock, flags);
- } else {
- rc = signalremove_inner(channel, queue, msg);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-
-static bool queue_empty(struct visorchannel *channel, u32 queue)
-{
- struct signal_queue_header sig_hdr;
-
- if (sig_read_header(channel, queue, &sig_hdr))
- return true;
- return (sig_hdr.head == sig_hdr.tail);
-}
-
-/**
- * visorchannel_signalempty() - checks if the designated channel/queue contains
- * any messages
- * @channel: the channel to query
- * @queue: the queue in the channel to query
- *
- * Return: boolean indicating whether any messages in the designated
- * channel/queue are present
- */
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
-{
- bool rc;
- unsigned long flags;
-
- if (!channel->needs_lock)
- return queue_empty(channel, queue);
- spin_lock_irqsave(&channel->remove_lock, flags);
- rc = queue_empty(channel, queue);
- spin_unlock_irqrestore(&channel->remove_lock, flags);
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalempty);
-
-static int signalinsert_inner(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- struct signal_queue_header sig_hdr;
- int err;
-
- err = sig_read_header(channel, queue, &sig_hdr);
- if (err)
- return err;
- sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
- if (sig_hdr.head == sig_hdr.tail) {
- sig_hdr.num_overflows++;
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
- if (err)
- return err;
- return -EIO;
- }
- err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
- if (err)
- return err;
- sig_hdr.num_sent++;
- /*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
- * host memory. Required for channel sync.
- */
- mb();
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
- if (err)
- return err;
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
- if (err)
- return err;
- return 0;
-}
-
-/*
- * visorchannel_create() - creates the struct visorchannel abstraction for a
- * data area in memory, but does NOT modify this data
- * area
- * @physaddr: physical address of start of channel
- * @gfp: gfp_t to use when allocating memory for the data struct
- * @guid: GUID that identifies channel type;
- * @needs_lock: must specify true if you have multiple threads of execution
- * that will be calling visorchannel methods of this
- * visorchannel at the same time
- *
- * Return: pointer to visorchannel that was created if successful,
- * otherwise NULL
- */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid, bool needs_lock)
-{
- struct visorchannel *channel;
- int err;
- size_t size = sizeof(struct channel_header);
-
- if (physaddr == 0)
- return NULL;
-
- channel = kzalloc(sizeof(*channel), gfp);
- if (!channel)
- return NULL;
- channel->needs_lock = needs_lock;
- spin_lock_init(&channel->insert_lock);
- spin_lock_init(&channel->remove_lock);
- /*
- * Video driver constains the efi framebuffer so it will get a conflict
- * resource when requesting its full mem region. Since we are only
- * using the efi framebuffer for video we can ignore this. Remember that
- * we haven't requested it so we don't try to release later on.
- */
- channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
- if (!channel->requested && !guid_equal(guid, &visor_video_guid))
- /* we only care about errors if this is not the video channel */
- goto err_destroy_channel;
- channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(physaddr, size);
- goto err_destroy_channel;
- }
- channel->physaddr = physaddr;
- channel->nbytes = size;
- err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
- if (err)
- goto err_destroy_channel;
- size = (ulong)channel->chan_hdr.size;
- memunmap(channel->mapped);
- if (channel->requested)
- release_mem_region(channel->physaddr, channel->nbytes);
- channel->mapped = NULL;
- channel->requested = request_mem_region(channel->physaddr, size,
- VISOR_DRV_NAME);
- if (!channel->requested && !guid_equal(guid, &visor_video_guid))
- /* we only care about errors if this is not the video channel */
- goto err_destroy_channel;
- channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(channel->physaddr, size);
- goto err_destroy_channel;
- }
- channel->nbytes = size;
- guid_copy(&channel->guid, guid);
- return channel;
-
-err_destroy_channel:
- visorchannel_destroy(channel);
- return NULL;
-}
-
-/**
- * visorchannel_signalinsert() - inserts a message into the designated
- * channel/queue
- * @channel: the channel the message will be added to
- * @queue: the queue the message will be added to
- * @msg: the message to insert
- *
- * Return: integer error code indicating the status of the insertion
- */
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- int rc;
- unsigned long flags;
-
- if (channel->needs_lock) {
- spin_lock_irqsave(&channel->insert_lock, flags);
- rc = signalinsert_inner(channel, queue, msg);
- spin_unlock_irqrestore(&channel->insert_lock, flags);
- } else {
- rc = signalinsert_inner(channel, queue, msg);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
deleted file mode 100644
index 5668cad86e37..000000000000
--- a/drivers/visorbus/visorchipset.c
+++ /dev/null
@@ -1,1691 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/acpi.h>
-#include <linux/crash_dump.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-
-/* {72120008-4AAB-11DC-8530-444553544200} */
-#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
- 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-
-static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
-static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
-static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
-
-#define POLLJIFFIES_CONTROLVM_FAST 1
-#define POLLJIFFIES_CONTROLVM_SLOW 100
-
-#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
-
-#define UNISYS_VISOR_LEAF_ID 0x40000000
-
-/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
-#define UNISYS_VISOR_ID_EBX 0x73696e55
-#define UNISYS_VISOR_ID_ECX 0x70537379
-#define UNISYS_VISOR_ID_EDX 0x34367261
-
-/*
- * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
- * to slow polling mode. As soon as we get a controlvm message, we switch back
- * to fast polling mode.
- */
-#define MIN_IDLE_SECONDS 10
-
-struct parser_context {
- unsigned long allocbytes;
- unsigned long param_bytes;
- u8 *curr;
- unsigned long bytes_remaining;
- bool byte_stream;
- struct visor_controlvm_parameters_header data;
-};
-
-/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
-#define VMCALL_CONTROLVM_ADDR 0x0501
-
-enum vmcall_result {
- VMCALL_RESULT_SUCCESS = 0,
- VMCALL_RESULT_INVALID_PARAM = 1,
- VMCALL_RESULT_DATA_UNAVAILABLE = 2,
- VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
- VMCALL_RESULT_DEVICE_ERROR = 4,
- VMCALL_RESULT_DEVICE_NOT_READY = 5
-};
-
-/*
- * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
- * parameters to VMCALL_CONTROLVM_ADDR
- * interface.
- * @address: The Guest-relative physical address of the ControlVm channel.
- * This VMCall fills this in with the appropriate address.
- * Contents provided by this VMCALL (OUT).
- * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
- * this in with the appropriate address. Contents provided by
- * this VMCALL (OUT).
- * @unused: Unused Bytes in the 64-Bit Aligned Struct.
- */
-struct vmcall_io_controlvm_addr_params {
- u64 address;
- u32 channel_bytes;
- u8 unused[4];
-} __packed;
-
-struct visorchipset_device {
- struct acpi_device *acpi_device;
- unsigned long poll_jiffies;
- /* when we got our last controlvm message */
- unsigned long most_recent_message_jiffies;
- struct delayed_work periodic_controlvm_work;
- struct visorchannel *controlvm_channel;
- unsigned long controlvm_payload_bytes_buffered;
- /*
- * The following variables are used to handle the scenario where we are
- * unable to offload the payload from a controlvm message due to memory
- * requirements. In this scenario, we simply stash the controlvm
- * message, then attempt to process it again the next time
- * controlvm_periodic_work() runs.
- */
- struct controlvm_message controlvm_pending_msg;
- bool controlvm_pending_msg_valid;
- struct vmcall_io_controlvm_addr_params controlvm_params;
-};
-
-static struct visorchipset_device *chipset_dev;
-
-struct parahotplug_request {
- struct list_head list;
- int id;
- unsigned long expiration;
- struct controlvm_message msg;
-};
-
-/* prototypes for attributes */
-static ssize_t toolaction_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 tool_action = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- tool_action),
- &tool_action, sizeof(u8));
- if (err)
- return err;
- return sprintf(buf, "%u\n", tool_action);
-}
-
-static ssize_t toolaction_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u8 tool_action;
- int err;
-
- if (kstrtou8(buf, 10, &tool_action))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- tool_action),
- &tool_action, sizeof(u8));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(toolaction);
-
-static ssize_t boottotool_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct efi_visor_indication efi_visor_indication;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- efi_visor_ind),
- &efi_visor_indication,
- sizeof(struct efi_visor_indication));
- if (err)
- return err;
- return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
-}
-
-static ssize_t boottotool_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int val, err;
- struct efi_visor_indication efi_visor_indication;
-
- if (kstrtoint(buf, 10, &val))
- return -EINVAL;
- efi_visor_indication.boot_to_tool = val;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- efi_visor_ind),
- &(efi_visor_indication),
- sizeof(struct efi_visor_indication));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(boottotool);
-
-static ssize_t error_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u32 error = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_error),
- &error, sizeof(u32));
- if (err)
- return err;
- return sprintf(buf, "%u\n", error);
-}
-
-static ssize_t error_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u32 error;
- int err;
-
- if (kstrtou32(buf, 10, &error))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_error),
- &error, sizeof(u32));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(error);
-
-static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u32 text_id = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_text_id),
- &text_id, sizeof(u32));
- if (err)
- return err;
- return sprintf(buf, "%u\n", text_id);
-}
-
-static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u32 text_id;
- int err;
-
- if (kstrtou32(buf, 10, &text_id))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_text_id),
- &text_id, sizeof(u32));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(textid);
-
-static ssize_t remaining_steps_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u16 remaining_steps = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
- if (err)
- return err;
- return sprintf(buf, "%hu\n", remaining_steps);
-}
-
-static ssize_t remaining_steps_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u16 remaining_steps;
- int err;
-
- if (kstrtou16(buf, 10, &remaining_steps))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(remaining_steps);
-
-static void controlvm_init_response(struct controlvm_message *msg,
- struct controlvm_message_header *msg_hdr,
- int response)
-{
- memset(msg, 0, sizeof(struct controlvm_message));
- memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
- msg->hdr.payload_bytes = 0;
- msg->hdr.payload_vm_offset = 0;
- msg->hdr.payload_max_bytes = 0;
- if (response < 0) {
- msg->hdr.flags.failed = 1;
- msg->hdr.completion_status = (u32)(-response);
- }
-}
-
-static int controlvm_respond_chipset_init(
- struct controlvm_message_header *msg_hdr,
- int response,
- enum visor_chipset_feature features)
-{
- struct controlvm_message outmsg;
-
- controlvm_init_response(&outmsg, msg_hdr, response);
- outmsg.cmd.init_chipset.features = features;
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int chipset_init(struct controlvm_message *inmsg)
-{
- static int chipset_inited;
- enum visor_chipset_feature features = 0;
- int rc = CONTROLVM_RESP_SUCCESS;
- int res = 0;
-
- if (chipset_inited) {
- rc = -CONTROLVM_RESP_ALREADY_DONE;
- res = -EIO;
- goto out_respond;
- }
- chipset_inited = 1;
- /*
- * Set features to indicate we support parahotplug (if Command also
- * supports it). Set the "reply" bit so Command knows this is a
- * features-aware driver.
- */
- features = inmsg->cmd.init_chipset.features &
- VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
- features |= VISOR_CHIPSET_FEATURE_REPLY;
-
-out_respond:
- if (inmsg->hdr.flags.response_expected)
- res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
-
- return res;
-}
-
-static int controlvm_respond(struct controlvm_message_header *msg_hdr,
- int response, struct visor_segment_state *state)
-{
- struct controlvm_message outmsg;
-
- controlvm_init_response(&outmsg, msg_hdr, response);
- if (outmsg.hdr.flags.test_message == 1)
- return -EINVAL;
- if (state) {
- outmsg.cmd.device_change_state.state = *state;
- outmsg.cmd.device_change_state.flags.phys_device = 1;
- }
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-enum crash_obj_type {
- CRASH_DEV,
- CRASH_BUS,
-};
-
-static int save_crash_message(struct controlvm_message *msg,
- enum crash_obj_type cr_type)
-{
- u32 local_crash_msg_offset;
- u16 local_crash_msg_count;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read message count\n");
- return err;
- }
- if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev,
- "invalid number of messages\n");
- return -EIO;
- }
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read offset\n");
- return err;
- }
- switch (cr_type) {
- case CRASH_DEV:
- local_crash_msg_offset += sizeof(struct controlvm_message);
- err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset, msg,
- sizeof(struct controlvm_message));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to write dev msg\n");
- return err;
- }
- break;
- case CRASH_BUS:
- err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset, msg,
- sizeof(struct controlvm_message));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to write bus msg\n");
- return err;
- }
- break;
- default:
- dev_err(&chipset_dev->acpi_device->dev,
- "Invalid crash_obj_type\n");
- break;
- }
- return 0;
-}
-
-static int controlvm_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
-{
- if (pending_msg_hdr->id != (u32)cmd_id)
- return -EINVAL;
-
- return controlvm_respond(pending_msg_hdr, response, NULL);
-}
-
-static int device_changestate_responder(enum controlvm_id cmd_id,
- struct visor_device *p, int response,
- struct visor_segment_state state)
-{
- struct controlvm_message outmsg;
-
- if (p->pending_msg_hdr->id != cmd_id)
- return -EINVAL;
-
- controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
- outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
- outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
- outmsg.cmd.device_change_state.state = state;
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int visorbus_create(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->create_bus.bus_no;
- struct visor_device *bus_info;
- struct visorchannel *visorchannel;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (bus_info && bus_info->state.created == 1) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed %s: already exists\n", __func__);
- err = -EEXIST;
- goto err_respond;
- }
- bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
- if (!bus_info) {
- err = -ENOMEM;
- goto err_respond;
- }
- INIT_LIST_HEAD(&bus_info->list_all);
- bus_info->chipset_bus_no = bus_no;
- bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
- err = save_crash_message(inmsg, CRASH_BUS);
- if (err)
- goto err_free_bus_info;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_free_bus_info;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
- visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
- GFP_KERNEL,
- &cmd->create_bus.bus_data_type_guid,
- false);
- if (!visorchannel) {
- err = -ENOMEM;
- goto err_free_pending_msg;
- }
- bus_info->visorchannel = visorchannel;
- /* Response will be handled by visorbus_create_instance on success */
- err = visorbus_create_instance(bus_info);
- if (err)
- goto err_destroy_channel;
- return 0;
-
-err_destroy_channel:
- visorchannel_destroy(visorchannel);
-
-err_free_pending_msg:
- kfree(bus_info->pending_msg_hdr);
-
-err_free_bus_info:
- kfree(bus_info);
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_destroy(struct controlvm_message *inmsg)
-{
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
- struct visor_device *bus_info;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- err = -ENOENT;
- goto err_respond;
- }
- if (bus_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EEXIST;
- goto err_respond;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
- /* Response will be handled by visorbus_remove_instance */
- visorbus_remove_instance(bus_info);
- return 0;
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static const guid_t *parser_id_get(struct parser_context *ctx)
-{
- return &ctx->data.id;
-}
-
-static void *parser_string_get(u8 *pscan, int nscan)
-{
- int value_length;
- void *value;
-
- if (nscan == 0)
- return NULL;
-
- value_length = strnlen(pscan, nscan);
- value = kzalloc(value_length + 1, GFP_KERNEL);
- if (!value)
- return NULL;
- if (value_length > 0)
- memcpy(value, pscan, value_length);
- return value;
-}
-
-static void *parser_name_get(struct parser_context *ctx)
-{
- struct visor_controlvm_parameters_header *phdr;
-
- phdr = &ctx->data;
- if ((unsigned long)phdr->name_offset +
- (unsigned long)phdr->name_length > ctx->param_bytes)
- return NULL;
- ctx->curr = (char *)&phdr + phdr->name_offset;
- ctx->bytes_remaining = phdr->name_length;
- return parser_string_get(ctx->curr, phdr->name_length);
-}
-
-static int visorbus_configure(struct controlvm_message *inmsg,
- struct parser_context *parser_ctx)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- u32 bus_no;
- struct visor_device *bus_info;
- int err = 0;
-
- bus_no = cmd->configure_bus.bus_no;
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- err = -EINVAL;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (bus_info->pending_msg_hdr) {
- err = -EIO;
- goto err_respond;
- }
- err = visorchannel_set_clientpartition(bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
- if (err)
- goto err_respond;
- if (parser_ctx) {
- const guid_t *partition_guid = parser_id_get(parser_ctx);
-
- guid_copy(&bus_info->partition_guid, partition_guid);
- bus_info->name = parser_name_get(parser_ctx);
- }
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return 0;
-
-err_respond:
- dev_err(&chipset_dev->acpi_device->dev,
- "%s exited with err: %d\n", __func__, err);
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_create(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->create_device.bus_no;
- u32 dev_no = cmd->create_device.dev_no;
- struct visor_device *dev_info;
- struct visor_device *bus_info;
- struct visorchannel *visorchannel;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to get bus by id: %d\n", bus_no);
- err = -ENODEV;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "bus not created, id: %d\n", bus_no);
- err = -EINVAL;
- goto err_respond;
- }
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (dev_info && dev_info->state.created == 1) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to get bus by id: %d/%d\n", bus_no, dev_no);
- err = -EEXIST;
- goto err_respond;
- }
-
- dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info) {
- err = -ENOMEM;
- goto err_respond;
- }
- dev_info->chipset_bus_no = bus_no;
- dev_info->chipset_dev_no = dev_no;
- guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
- dev_info->device.parent = &bus_info->device;
- visorchannel = visorchannel_create(cmd->create_device.channel_addr,
- GFP_KERNEL,
- &cmd->create_device.data_type_guid,
- true);
- if (!visorchannel) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to create visorchannel: %d/%d\n",
- bus_no, dev_no);
- err = -ENOMEM;
- goto err_free_dev_info;
- }
- dev_info->visorchannel = visorchannel;
- guid_copy(&dev_info->channel_type_guid,
- &cmd->create_device.data_type_guid);
- if (guid_equal(&cmd->create_device.data_type_guid,
- &visor_vhba_channel_guid)) {
- err = save_crash_message(inmsg, CRASH_DEV);
- if (err)
- goto err_destroy_visorchannel;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_destroy_visorchannel;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- /* create_visor_device will send response */
- err = create_visor_device(dev_info);
- if (err)
- goto err_destroy_visorchannel;
-
- return 0;
-
-err_destroy_visorchannel:
- visorchannel_destroy(visorchannel);
-
-err_free_dev_info:
- kfree(dev_info);
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_changestate(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->device_change_state.bus_no;
- u32 dev_no = cmd->device_change_state.dev_no;
- struct visor_segment_state state = cmd->device_change_state.state;
- struct visor_device *dev_info;
- int err = 0;
-
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (dev_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EIO;
- goto err_respond;
- }
-
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- if (state.alive == segment_state_running.alive &&
- state.operating == segment_state_running.operating)
- /* Response will be sent from visorchipset_device_resume */
- err = visorchipset_device_resume(dev_info);
- /* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.alive == segment_state_standby.alive &&
- state.operating == segment_state_standby.operating)
- /*
- * technically this is standby case where server is lost.
- * Response will be sent from visorchipset_device_pause.
- */
- err = visorchipset_device_pause(dev_info);
- if (err)
- goto err_respond;
- return 0;
-
-err_respond:
- dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_destroy(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->destroy_device.bus_no;
- u32 dev_no = cmd->destroy_device.dev_no;
- struct visor_device *dev_info;
- int err;
-
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (dev_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EIO;
- goto err_respond;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
-
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- kfree(dev_info->name);
- remove_visor_device(dev_info);
- return 0;
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-/*
- * The general parahotplug flow works as follows. The visorchipset receives
- * a DEVICE_CHANGESTATE message from Command specifying a physical device
- * to enable or disable. The CONTROLVM message handler calls
- * parahotplug_process_message, which then adds the message to a global list
- * and kicks off a udev event which causes a user level script to enable or
- * disable the specified device. The udev script then writes to
- * /sys/devices/platform/visorchipset/parahotplug, which causes the
- * parahotplug store functions to get called, at which point the
- * appropriate CONTROLVM message is retrieved from the list and responded to.
- */
-
-#define PARAHOTPLUG_TIMEOUT_MS 2000
-
-/*
- * parahotplug_next_id() - generate unique int to match an outstanding
- * CONTROLVM message with a udev script /sys
- * response
- *
- * Return: a unique integer value
- */
-static int parahotplug_next_id(void)
-{
- static atomic_t id = ATOMIC_INIT(0);
-
- return atomic_inc_return(&id);
-}
-
-/*
- * parahotplug_next_expiration() - returns the time (in jiffies) when a
- * CONTROLVM message on the list should expire
- * -- PARAHOTPLUG_TIMEOUT_MS in the future
- *
- * Return: expected expiration time (in jiffies)
- */
-static unsigned long parahotplug_next_expiration(void)
-{
- return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
-}
-
-/*
- * parahotplug_request_create() - create a parahotplug_request, which is
- * basically a wrapper for a CONTROLVM_MESSAGE
- * that we can stick on a list
- * @msg: the message to insert in the request
- *
- * Return: the request containing the provided message
- */
-static struct parahotplug_request *parahotplug_request_create(
- struct controlvm_message *msg)
-{
- struct parahotplug_request *req;
-
- req = kmalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return NULL;
- req->id = parahotplug_next_id();
- req->expiration = parahotplug_next_expiration();
- req->msg = *msg;
- return req;
-}
-
-/*
- * parahotplug_request_destroy() - free a parahotplug_request
- * @req: the request to deallocate
- */
-static void parahotplug_request_destroy(struct parahotplug_request *req)
-{
- kfree(req);
-}
-
-static LIST_HEAD(parahotplug_request_list);
-/* lock for above */
-static DEFINE_SPINLOCK(parahotplug_request_list_lock);
-
-/*
- * parahotplug_request_complete() - mark request as complete
- * @id: the id of the request
- * @active: indicates whether the request is assigned to active partition
- *
- * Called from the /sys handler, which means the user script has
- * finished the enable/disable. Find the matching identifier, and
- * respond to the CONTROLVM message with success.
- *
- * Return: 0 on success or -EINVAL on failure
- */
-static int parahotplug_request_complete(int id, u16 active)
-{
- struct list_head *pos;
- struct list_head *tmp;
- struct parahotplug_request *req;
-
- spin_lock(&parahotplug_request_list_lock);
- /* Look for a request matching "id". */
- list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- req = list_entry(pos, struct parahotplug_request, list);
- if (req->id == id) {
- /*
- * Found a match. Remove it from the list and
- * respond.
- */
- list_del(pos);
- spin_unlock(&parahotplug_request_list_lock);
- req->msg.cmd.device_change_state.state.active = active;
- if (req->msg.hdr.flags.response_expected)
- controlvm_respond(
- &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- &req->msg.cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- return 0;
- }
- }
- spin_unlock(&parahotplug_request_list_lock);
- return -EINVAL;
-}
-
-/*
- * devicedisabled_store() - disables the hotplug device
- * @dev: sysfs interface variable not utilized in this function
- * @attr: sysfs interface variable not utilized in this function
- * @buf: buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/devicedisabled interface gets called by our support script
- * when an SR-IOV device has been shut down. The ID is passed to the script
- * and then passed back when the device has been removed.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t devicedisabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
- int err;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
- err = parahotplug_request_complete(id, 0);
- if (err < 0)
- return err;
- return count;
-}
-static DEVICE_ATTR_WO(devicedisabled);
-
-/*
- * deviceenabled_store() - enables the hotplug device
- * @dev: sysfs interface variable not utilized in this function
- * @attr: sysfs interface variable not utilized in this function
- * @buf: buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/deviceenabled interface gets called by our support script
- * when an SR-IOV device has been recovered. The ID is passed to the script
- * and then passed back when the device has been brought back up.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t deviceenabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
- parahotplug_request_complete(id, 1);
- return count;
-}
-static DEVICE_ATTR_WO(deviceenabled);
-
-static struct attribute *visorchipset_install_attrs[] = {
- &dev_attr_toolaction.attr,
- &dev_attr_boottotool.attr,
- &dev_attr_error.attr,
- &dev_attr_textid.attr,
- &dev_attr_remaining_steps.attr,
- NULL
-};
-
-static const struct attribute_group visorchipset_install_group = {
- .name = "install",
- .attrs = visorchipset_install_attrs
-};
-
-static struct attribute *visorchipset_parahotplug_attrs[] = {
- &dev_attr_devicedisabled.attr,
- &dev_attr_deviceenabled.attr,
- NULL
-};
-
-static const struct attribute_group visorchipset_parahotplug_group = {
- .name = "parahotplug",
- .attrs = visorchipset_parahotplug_attrs
-};
-
-static const struct attribute_group *visorchipset_dev_groups[] = {
- &visorchipset_install_group,
- &visorchipset_parahotplug_group,
- NULL
-};
-
-/*
- * parahotplug_request_kickoff() - initiate parahotplug request
- * @req: the request to initiate
- *
- * Cause uevent to run the user level script to do the disable/enable specified
- * in the parahotplug_request.
- */
-static int parahotplug_request_kickoff(struct parahotplug_request *req)
-{
- struct controlvm_message_packet *cmd = &req->msg.cmd;
- char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
- env_func[40];
- char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
- env_func, NULL
- };
-
- sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
- sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
- sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
- cmd->device_change_state.state.active);
- sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
- cmd->device_change_state.bus_no);
- sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
- cmd->device_change_state.dev_no >> 3);
- sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
- cmd->device_change_state.dev_no & 0x7);
- return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_CHANGE, envp);
-}
-
-/*
- * parahotplug_process_message() - enables or disables a PCI device by kicking
- * off a udev script
- * @inmsg: the message indicating whether to enable or disable
- */
-static int parahotplug_process_message(struct controlvm_message *inmsg)
-{
- struct parahotplug_request *req;
- int err;
-
- req = parahotplug_request_create(inmsg);
- if (!req)
- return -ENOMEM;
- /*
- * For enable messages, just respond with success right away, we don't
- * need to wait to see if the enable was successful.
- */
- if (inmsg->cmd.device_change_state.state.active) {
- err = parahotplug_request_kickoff(req);
- if (err)
- goto err_respond;
- controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
- &inmsg->cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- return 0;
- }
- /*
- * For disable messages, add the request to the request list before
- * kicking off the udev script. It won't get responded to until the
- * script has indicated it's done.
- */
- spin_lock(&parahotplug_request_list_lock);
- list_add_tail(&req->list, &parahotplug_request_list);
- spin_unlock(&parahotplug_request_list_lock);
- err = parahotplug_request_kickoff(req);
- if (err)
- goto err_respond;
- return 0;
-
-err_respond:
- controlvm_respond(&inmsg->hdr, err,
- &inmsg->cmd.device_change_state.state);
- return err;
-}
-
-/*
- * chipset_ready_uevent() - sends chipset_ready action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
-{
- int res;
-
- res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-/*
- * chipset_selftest_uevent() - sends chipset_selftest action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
-{
- char env_selftest[20];
- char *envp[] = { env_selftest, NULL };
- int res;
-
- sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
- res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_CHANGE, envp);
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-/*
- * chipset_notready_uevent() - sends chipset_notready action
- *
- * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
-{
- int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_OFFLINE);
-
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-static int unisys_vmcall(unsigned long tuple, unsigned long param)
-{
- int result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
- unsigned long reg_ebx;
- unsigned long reg_ecx;
-
- reg_ebx = param & 0xFFFFFFFF;
- reg_ecx = param >> 32;
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
- if (result)
- goto error;
- return 0;
-
-/* Need to convert from VMCALL error codes to Linux */
-error:
- switch (result) {
- case VMCALL_RESULT_INVALID_PARAM:
- return -EINVAL;
- case VMCALL_RESULT_DATA_UNAVAILABLE:
- return -ENODEV;
- default:
- return -EFAULT;
- }
-}
-
-static int controlvm_channel_create(struct visorchipset_device *dev)
-{
- struct visorchannel *chan;
- u64 addr;
- int err;
-
- err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
- virt_to_phys(&dev->controlvm_params));
- if (err)
- return err;
- addr = dev->controlvm_params.address;
- chan = visorchannel_create(addr, GFP_KERNEL,
- &visor_controlvm_channel_guid, true);
- if (!chan)
- return -ENOMEM;
- dev->controlvm_channel = chan;
- return 0;
-}
-
-static void setup_crash_devices_work_queue(struct work_struct *work)
-{
- struct controlvm_message local_crash_bus_msg;
- struct controlvm_message local_crash_dev_msg;
- struct controlvm_message msg = {
- .hdr.id = CONTROLVM_CHIPSET_INIT,
- .cmd.init_chipset = {
- .bus_count = 23,
- .switch_count = 0,
- },
- };
- u32 local_crash_msg_offset;
- u16 local_crash_msg_count;
-
- /* send init chipset msg */
- chipset_init(&msg);
- /* get saved message count */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
- return;
- }
- /* get saved crash message offset */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* read create device message for storage bus offset */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- &local_crash_bus_msg,
- sizeof(struct controlvm_message)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* read create device message for storage device */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- local_crash_msg_offset +
- sizeof(struct controlvm_message),
- &local_crash_dev_msg,
- sizeof(struct controlvm_message)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* reuse IOVM create bus message */
- if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
- dev_err(&chipset_dev->acpi_device->dev,
- "no valid create_bus message\n");
- return;
- }
- visorbus_create(&local_crash_bus_msg);
- /* reuse create device message for storage device */
- if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
- dev_err(&chipset_dev->acpi_device->dev,
- "no valid create_device message\n");
- return;
- }
- visorbus_device_create(&local_crash_dev_msg);
-}
-
-void visorbus_response(struct visor_device *bus_info, int response,
- int controlvm_id)
-{
- if (!bus_info->pending_msg_hdr)
- return;
-
- controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
- kfree(bus_info->pending_msg_hdr);
- bus_info->pending_msg_hdr = NULL;
-}
-
-void visorbus_device_changestate_response(struct visor_device *dev_info,
- int response,
- struct visor_segment_state state)
-{
- if (!dev_info->pending_msg_hdr)
- return;
-
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
- response, state);
- kfree(dev_info->pending_msg_hdr);
- dev_info->pending_msg_hdr = NULL;
-}
-
-static void parser_done(struct parser_context *ctx)
-{
- chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
- kfree(ctx);
-}
-
-static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
- bool *retry)
-{
- unsigned long allocbytes;
- struct parser_context *ctx;
- void *mapping;
-
- *retry = false;
- /* alloc an extra byte to ensure payload is \0 terminated */
- allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
- sizeof(struct visor_controlvm_parameters_header));
- if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
- MAX_CONTROLVM_PAYLOAD_BYTES) {
- *retry = true;
- return NULL;
- }
- ctx = kzalloc(allocbytes, GFP_KERNEL);
- if (!ctx) {
- *retry = true;
- return NULL;
- }
- ctx->allocbytes = allocbytes;
- ctx->param_bytes = bytes;
- mapping = memremap(addr, bytes, MEMREMAP_WB);
- if (!mapping)
- goto err_finish_ctx;
- memcpy(&ctx->data, mapping, bytes);
- memunmap(mapping);
- ctx->byte_stream = true;
- chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
- return ctx;
-
-err_finish_ctx:
- kfree(ctx);
- return NULL;
-}
-
-/*
- * handle_command() - process a controlvm message
- * @inmsg: the message to process
- * @channel_addr: address of the controlvm channel
- *
- * Return:
- * 0 - Successfully processed the message
- * -EAGAIN - ControlVM message was not processed and should be retried
- * reading the next controlvm message; a scenario where this can
- * occur is when we need to throttle the allocation of memory in
- * which to copy out controlvm payload data.
- * < 0 - error: ControlVM message was processed but an error occurred.
- */
-static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
-{
- struct controlvm_message_packet *cmd = &inmsg.cmd;
- u64 parm_addr;
- u32 parm_bytes;
- struct parser_context *parser_ctx = NULL;
- struct controlvm_message ackmsg;
- int err = 0;
-
- /* create parsing context if necessary */
- parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
- parm_bytes = inmsg.hdr.payload_bytes;
- /*
- * Parameter and channel addresses within test messages actually lie
- * within our OS-controlled memory. We need to know that, because it
- * makes a difference in how we compute the virtual address.
- */
- if (parm_bytes) {
- bool retry;
-
- parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
- if (!parser_ctx && retry)
- return -EAGAIN;
- }
- controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
- err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_ACK, &ackmsg);
- if (err)
- return err;
- switch (inmsg.hdr.id) {
- case CONTROLVM_CHIPSET_INIT:
- err = chipset_init(&inmsg);
- break;
- case CONTROLVM_BUS_CREATE:
- err = visorbus_create(&inmsg);
- break;
- case CONTROLVM_BUS_DESTROY:
- err = visorbus_destroy(&inmsg);
- break;
- case CONTROLVM_BUS_CONFIGURE:
- err = visorbus_configure(&inmsg, parser_ctx);
- break;
- case CONTROLVM_DEVICE_CREATE:
- err = visorbus_device_create(&inmsg);
- break;
- case CONTROLVM_DEVICE_CHANGESTATE:
- if (cmd->device_change_state.flags.phys_device) {
- err = parahotplug_process_message(&inmsg);
- } else {
- /*
- * save the hdr and cmd structures for later use when
- * sending back the response to Command
- */
- err = visorbus_device_changestate(&inmsg);
- break;
- }
- break;
- case CONTROLVM_DEVICE_DESTROY:
- err = visorbus_device_destroy(&inmsg);
- break;
- case CONTROLVM_DEVICE_CONFIGURE:
- /* no op just send a respond that we passed */
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
- NULL);
- break;
- case CONTROLVM_CHIPSET_READY:
- err = chipset_ready_uevent(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_SELFTEST:
- err = chipset_selftest_uevent(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_STOP:
- err = chipset_notready_uevent(&inmsg.hdr);
- break;
- default:
- err = -ENOMSG;
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr,
- -CONTROLVM_RESP_ID_UNKNOWN, NULL);
- break;
- }
- if (parser_ctx) {
- parser_done(parser_ctx);
- parser_ctx = NULL;
- }
- return err;
-}
-
-/*
- * read_controlvm_event() - retreives the next message from the
- * CONTROLVM_QUEUE_EVENT queue in the controlvm
- * channel
- * @msg: pointer to the retrieved message
- *
- * Return: 0 if valid message was retrieved or -error
- */
-static int read_controlvm_event(struct controlvm_message *msg)
-{
- int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg);
-
- if (err)
- return err;
- /* got a message */
- if (msg->hdr.flags.test_message == 1)
- return -EINVAL;
- return 0;
-}
-
-/*
- * parahotplug_process_list() - remove any request from the list that's been on
- * there too long and respond with an error
- */
-static void parahotplug_process_list(void)
-{
- struct list_head *pos;
- struct list_head *tmp;
-
- spin_lock(&parahotplug_request_list_lock);
- list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- struct parahotplug_request *req =
- list_entry(pos, struct parahotplug_request, list);
-
- if (!time_after_eq(jiffies, req->expiration))
- continue;
- list_del(pos);
- if (req->msg.hdr.flags.response_expected)
- controlvm_respond(
- &req->msg.hdr,
- CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
- &req->msg.cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- }
- spin_unlock(&parahotplug_request_list_lock);
-}
-
-static void controlvm_periodic_work(struct work_struct *work)
-{
- struct controlvm_message inmsg;
- int count = 0;
- int err;
-
- /* Drain the RESPONSE queue make it empty */
- do {
- err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_RESPONSE,
- &inmsg);
- } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
- if (err != -EAGAIN)
- goto schedule_out;
- if (chipset_dev->controlvm_pending_msg_valid) {
- /*
- * we throttled processing of a prior msg, so try to process
- * it again rather than reading a new one
- */
- inmsg = chipset_dev->controlvm_pending_msg;
- chipset_dev->controlvm_pending_msg_valid = false;
- err = 0;
- } else {
- err = read_controlvm_event(&inmsg);
- }
- while (!err) {
- chipset_dev->most_recent_message_jiffies = jiffies;
- err = handle_command(inmsg,
- visorchannel_get_physaddr
- (chipset_dev->controlvm_channel));
- if (err == -EAGAIN) {
- chipset_dev->controlvm_pending_msg = inmsg;
- chipset_dev->controlvm_pending_msg_valid = true;
- break;
- }
-
- err = read_controlvm_event(&inmsg);
- }
- /* parahotplug_worker */
- parahotplug_process_list();
-
-/*
- * The controlvm messages are sent in a bulk. If we start receiving messages, we
- * want the polling to be fast. If we do not receive any message for
- * MIN_IDLE_SECONDS, we can slow down the polling.
- */
-schedule_out:
- if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
- (HZ * MIN_IDLE_SECONDS))) {
- /*
- * it's been longer than MIN_IDLE_SECONDS since we processed
- * our last controlvm message; slow down the polling
- */
- if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
- } else {
- if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- }
- schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
- chipset_dev->poll_jiffies);
-}
-
-static int visorchipset_init(struct acpi_device *acpi_device)
-{
- int err = -ENOMEM;
- struct visorchannel *controlvm_channel;
-
- chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
- if (!chipset_dev)
- goto error;
- err = controlvm_channel_create(chipset_dev);
- if (err)
- goto error_free_chipset_dev;
- acpi_device->driver_data = chipset_dev;
- chipset_dev->acpi_device = acpi_device;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
- if (err < 0)
- goto error_destroy_channel;
- controlvm_channel = chipset_dev->controlvm_channel;
- if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
- &chipset_dev->acpi_device->dev,
- &visor_controlvm_channel_guid,
- "controlvm",
- sizeof(struct visor_controlvm_channel),
- VISOR_CONTROLVM_CHANNEL_VERSIONID,
- VISOR_CHANNEL_SIGNATURE)) {
- err = -ENODEV;
- goto error_delete_groups;
- }
- /* if booting in a crash kernel */
- if (is_kdump_kernel())
- INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
- setup_crash_devices_work_queue);
- else
- INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
- controlvm_periodic_work);
- chipset_dev->most_recent_message_jiffies = jiffies;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
- chipset_dev->poll_jiffies);
- err = visorbus_init();
- if (err < 0)
- goto error_cancel_work;
- return 0;
-
-error_cancel_work:
- cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
-
-error_delete_groups:
- sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
-
-error_destroy_channel:
- visorchannel_destroy(chipset_dev->controlvm_channel);
-
-error_free_chipset_dev:
- kfree(chipset_dev);
-
-error:
- dev_err(&acpi_device->dev, "failed with error %d\n", err);
- return err;
-}
-
-static int visorchipset_exit(struct acpi_device *acpi_device)
-{
- visorbus_exit();
- cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
- sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
- visorchannel_destroy(chipset_dev->controlvm_channel);
- kfree(chipset_dev);
- return 0;
-}
-
-static const struct acpi_device_id unisys_device_ids[] = {
- {"PNP0A07", 0},
- {"", 0},
-};
-
-static struct acpi_driver unisys_acpi_driver = {
- .name = "unisys_acpi",
- .class = "unisys_acpi_class",
- .owner = THIS_MODULE,
- .ids = unisys_device_ids,
- .ops = {
- .add = visorchipset_init,
- .remove = visorchipset_exit,
- },
-};
-
-MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-
-static __init int visorutil_spar_detect(void)
-{
- unsigned int eax, ebx, ecx, edx;
-
- if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
- /* check the ID */
- cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
- return (ebx == UNISYS_VISOR_ID_EBX) &&
- (ecx == UNISYS_VISOR_ID_ECX) &&
- (edx == UNISYS_VISOR_ID_EDX);
- }
- return 0;
-}
-
-static int __init init_unisys(void)
-{
- int result;
-
- if (!visorutil_spar_detect())
- return -ENODEV;
- result = acpi_bus_register_driver(&unisys_acpi_driver);
- if (result)
- return -ENODEV;
- pr_info("Unisys Visorchipset Driver Loaded.\n");
- return 0;
-};
-
-static void __exit exit_unisys(void)
-{
- acpi_bus_unregister_driver(&unisys_acpi_driver);
-}
-
-module_init(init_unisys);
-module_exit(exit_unisys);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
index 936392ca3c8c..c13dd9d2a604 100644
--- a/drivers/vme/Kconfig
+++ b/drivers/vme/Kconfig
@@ -15,6 +15,4 @@ source "drivers/vme/bridges/Kconfig"
source "drivers/vme/boards/Kconfig"
-source "drivers/staging/vme/devices/Kconfig"
-
endif # VME
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index f6664fc9596a..0eb560fc0153 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -172,8 +172,9 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send command control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send command control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
@@ -187,8 +188,9 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send mode control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send mode control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
@@ -202,72 +204,68 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
return err;
}
-static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
-{
- pr_info("%45s: %8x\n", str, buf[off]);
-}
-
-static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
+static void ds_dump_status(struct ds_device *ds_dev, unsigned char *buf, int count)
{
+ struct device *dev = &ds_dev->udev->dev;
int i;
- dev_info(&dev->udev->dev, "ep_status=0x%x, count=%d, status=%*phC",
- dev->ep[EP_STATUS], count, count, buf);
+ dev_info(dev, "ep_status=0x%x, count=%d, status=%*phC",
+ ds_dev->ep[EP_STATUS], count, count, buf);
if (count >= 16) {
- ds_print_msg(buf, "enable flag", 0);
- ds_print_msg(buf, "1-wire speed", 1);
- ds_print_msg(buf, "strong pullup duration", 2);
- ds_print_msg(buf, "programming pulse duration", 3);
- ds_print_msg(buf, "pulldown slew rate control", 4);
- ds_print_msg(buf, "write-1 low time", 5);
- ds_print_msg(buf, "data sample offset/write-0 recovery time",
- 6);
- ds_print_msg(buf, "reserved (test register)", 7);
- ds_print_msg(buf, "device status flags", 8);
- ds_print_msg(buf, "communication command byte 1", 9);
- ds_print_msg(buf, "communication command byte 2", 10);
- ds_print_msg(buf, "communication command buffer status", 11);
- ds_print_msg(buf, "1-wire data output buffer status", 12);
- ds_print_msg(buf, "1-wire data input buffer status", 13);
- ds_print_msg(buf, "reserved", 14);
- ds_print_msg(buf, "reserved", 15);
+ dev_dbg(dev, "enable flag: 0x%02x", buf[0]);
+ dev_dbg(dev, "1-wire speed: 0x%02x", buf[1]);
+ dev_dbg(dev, "strong pullup duration: 0x%02x", buf[2]);
+ dev_dbg(dev, "programming pulse duration: 0x%02x", buf[3]);
+ dev_dbg(dev, "pulldown slew rate control: 0x%02x", buf[4]);
+ dev_dbg(dev, "write-1 low time: 0x%02x", buf[5]);
+ dev_dbg(dev, "data sample offset/write-0 recovery time: 0x%02x", buf[6]);
+ dev_dbg(dev, "reserved (test register): 0x%02x", buf[7]);
+ dev_dbg(dev, "device status flags: 0x%02x", buf[8]);
+ dev_dbg(dev, "communication command byte 1: 0x%02x", buf[9]);
+ dev_dbg(dev, "communication command byte 2: 0x%02x", buf[10]);
+ dev_dbg(dev, "communication command buffer status: 0x%02x", buf[11]);
+ dev_dbg(dev, "1-wire data output buffer status: 0x%02x", buf[12]);
+ dev_dbg(dev, "1-wire data input buffer status: 0x%02x", buf[13]);
+ dev_dbg(dev, "reserved: 0x%02x", buf[14]);
+ dev_dbg(dev, "reserved: 0x%02x", buf[15]);
}
+
for (i = 16; i < count; ++i) {
if (buf[i] == RR_DETECT) {
- ds_print_msg(buf, "new device detect", i);
+ dev_dbg(dev, "New device detect.\n");
continue;
}
- ds_print_msg(buf, "Result Register Value: ", i);
+ dev_dbg(dev, "Result Register Value: 0x%02x", buf[i]);
if (buf[i] & RR_NRS)
- pr_info("NRS: Reset no presence or ...\n");
+ dev_dbg(dev, "NRS: Reset no presence or ...\n");
if (buf[i] & RR_SH)
- pr_info("SH: short on reset or set path\n");
+ dev_dbg(dev, "SH: short on reset or set path\n");
if (buf[i] & RR_APP)
- pr_info("APP: alarming presence on reset\n");
+ dev_dbg(dev, "APP: alarming presence on reset\n");
if (buf[i] & RR_VPP)
- pr_info("VPP: 12V expected not seen\n");
+ dev_dbg(dev, "VPP: 12V expected not seen\n");
if (buf[i] & RR_CMP)
- pr_info("CMP: compare error\n");
+ dev_dbg(dev, "CMP: compare error\n");
if (buf[i] & RR_CRC)
- pr_info("CRC: CRC error detected\n");
+ dev_dbg(dev, "CRC: CRC error detected\n");
if (buf[i] & RR_RDP)
- pr_info("RDP: redirected page\n");
+ dev_dbg(dev, "RDP: redirected page\n");
if (buf[i] & RR_EOS)
- pr_info("EOS: end of search error\n");
+ dev_dbg(dev, "EOS: end of search error\n");
}
}
-static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
- bool dump)
+static int ds_recv_status(struct ds_device *dev, struct ds_status *st)
{
int count, err;
@@ -281,14 +279,12 @@ static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
dev->st_buf, sizeof(dev->st_buf),
&count, 1000);
if (err < 0) {
- pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
- dev->ep[EP_STATUS], err);
+ dev_err(&dev->udev->dev,
+ "Failed to read 1-wire data from 0x%x: err=%d.\n",
+ dev->ep[EP_STATUS], err);
return err;
}
- if (dump)
- ds_dump_status(dev, dev->st_buf, count);
-
if (st && count >= sizeof(*st))
memcpy(st, dev->st_buf, sizeof(*st));
@@ -302,13 +298,15 @@ static void ds_reset_device(struct ds_device *dev)
* the strong pullup.
*/
if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
- pr_err("ds_reset_device: Error allowing strong pullup\n");
+ dev_err(&dev->udev->dev,
+ "%s: Error allowing strong pullup\n", __func__);
/* Chip strong pullup time was cleared. */
if (dev->spu_sleep) {
/* lower 4 bits are 0, see ds_set_pullup */
u8 del = dev->spu_sleep>>4;
if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
- pr_err("ds_reset_device: Error setting duration\n");
+ dev_err(&dev->udev->dev,
+ "%s: Error setting duration\n", __func__);
}
}
@@ -329,9 +327,16 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
+ int recv_len;
+
dev_info(&dev->udev->dev, "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
- ds_recv_status(dev, NULL, true);
+
+ /* status might tell us why endpoint is stuck? */
+ recv_len = ds_recv_status(dev, NULL);
+ if (recv_len >= 0)
+ ds_dump_status(dev, dev->st_buf, recv_len);
+
return err;
}
@@ -355,7 +360,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
count = 0;
err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
if (err < 0) {
- pr_err("Failed to write 1-wire data to ep0x%x: "
+ dev_err(&dev->udev->dev, "Failed to write 1-wire data to ep0x%x: "
"err=%d.\n", dev->ep[EP_DATA_OUT], err);
return err;
}
@@ -377,7 +382,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
err = ds_send_control(dev, CTL_RESUME_EXE, 0);
if (err)
break;
- err = ds_recv_status(dev, &st, false);
+ err = ds_recv_status(dev, &st);
if (err)
break;
@@ -424,7 +429,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
do {
st->status = 0;
- err = ds_recv_status(dev, st, false);
+ err = ds_recv_status(dev, st);
#if 0
if (err >= 0) {
int i;
@@ -437,7 +442,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
} while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
- pr_info("Resetting device after ST_EPOF.\n");
+ dev_info(&dev->udev->dev, "Resetting device after ST_EPOF.\n");
ds_reset_device(dev);
/* Always dump the device status. */
count = 101;
@@ -721,7 +726,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
do {
schedule_timeout(jtime);
- err = ds_recv_status(dev, &st, false);
+ err = ds_recv_status(dev, &st);
if (err < 0 || err < sizeof(st))
break;
@@ -992,10 +997,9 @@ static int ds_probe(struct usb_interface *intf,
int i, err, alt;
dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
- if (!dev) {
- pr_info("Failed to allocate new DS9490R structure.\n");
+ if (!dev)
return -ENOMEM;
- }
+
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
@@ -1025,7 +1029,7 @@ static int ds_probe(struct usb_interface *intf,
iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
- pr_info("Num endpoints=%d. It is not DS9490R.\n",
+ dev_err(&dev->udev->dev, "Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
err = -EINVAL;
goto err_out_clear;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 001ae1be9b61..32fd37698932 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1839,6 +1839,17 @@ config RALINK_WDT
help
Hardware driver for the Ralink SoC Watchdog Timer.
+config GXP_WATCHDOG
+ tristate "HPE GXP watchdog support"
+ depends on ARCH_HPE_GXP
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in HPE GXP SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called gxp-wdt.
+
config MT7621_WDT
tristate "Mediatek SoC watchdog"
select WATCHDOG_CORE
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 5f88a6237f7c..c324e9d820e9 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
+obj-$(CONFIG_GXP_WATCHDOG) += gxp-wdt.o
obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c
new file mode 100644
index 000000000000..b0b2d7a6fdde
--- /dev/null
+++ b/drivers/watchdog/gxp-wdt.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define MASK_WDGCS_ENABLE 0x01
+#define MASK_WDGCS_RELOAD 0x04
+#define MASK_WDGCS_NMIEN 0x08
+#define MASK_WDGCS_WARN 0x80
+
+#define WDT_MAX_TIMEOUT_MS 655350
+#define WDT_DEFAULT_TIMEOUT 30
+#define SECS_TO_WDOG_TICKS(x) ((x) * 100)
+#define WDOG_TICKS_TO_SECS(x) ((x) / 100)
+
+#define GXP_WDT_CNT_OFS 0x10
+#define GXP_WDT_CTRL_OFS 0x16
+
+struct gxp_wdt {
+ void __iomem *base;
+ struct watchdog_device wdd;
+};
+
+static void gxp_wdt_enable_reload(struct gxp_wdt *drvdata)
+{
+ u8 val;
+
+ val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+ val |= (MASK_WDGCS_ENABLE | MASK_WDGCS_RELOAD);
+ writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+}
+
+static int gxp_wdt_start(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ writew(SECS_TO_WDOG_TICKS(wdd->timeout), drvdata->base + GXP_WDT_CNT_OFS);
+ gxp_wdt_enable_reload(drvdata);
+ return 0;
+}
+
+static int gxp_wdt_stop(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u8 val;
+
+ val = readb_relaxed(drvdata->base + GXP_WDT_CTRL_OFS);
+ val &= ~MASK_WDGCS_ENABLE;
+ writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+ return 0;
+}
+
+static int gxp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u32 actual;
+
+ wdd->timeout = timeout;
+ actual = min(timeout * 100, wdd->max_hw_heartbeat_ms / 10);
+ writew(actual, drvdata->base + GXP_WDT_CNT_OFS);
+
+ return 0;
+}
+
+static unsigned int gxp_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u32 val = readw(drvdata->base + GXP_WDT_CNT_OFS);
+
+ return WDOG_TICKS_TO_SECS(val);
+}
+
+static int gxp_wdt_ping(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ gxp_wdt_enable_reload(drvdata);
+ return 0;
+}
+
+static int gxp_restart(struct watchdog_device *wdd, unsigned long action,
+ void *data)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ writew(1, drvdata->base + GXP_WDT_CNT_OFS);
+ gxp_wdt_enable_reload(drvdata);
+ mdelay(100);
+ return 0;
+}
+
+static const struct watchdog_ops gxp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gxp_wdt_start,
+ .stop = gxp_wdt_stop,
+ .ping = gxp_wdt_ping,
+ .set_timeout = gxp_wdt_set_timeout,
+ .get_timeleft = gxp_wdt_get_timeleft,
+ .restart = gxp_restart,
+};
+
+static const struct watchdog_info gxp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "HPE GXP Watchdog timer",
+};
+
+static int gxp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gxp_wdt *drvdata;
+ int err;
+ u8 val;
+
+ drvdata = devm_kzalloc(dev, sizeof(struct gxp_wdt), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ /*
+ * The register area where the timer and watchdog reside is disarranged.
+ * Hence mapping individual register blocks for the timer and watchdog
+ * is not recommended as they would have access to each others
+ * registers. Based on feedback the watchdog is no longer part of the
+ * device tree file and the timer driver now creates the watchdog as a
+ * child device. During the watchdogs creation, the timer driver passes
+ * the base address to the watchdog over the private interface.
+ */
+
+ drvdata->base = (void __iomem *)dev->platform_data;
+
+ drvdata->wdd.info = &gxp_wdt_info;
+ drvdata->wdd.ops = &gxp_wdt_ops;
+ drvdata->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+ drvdata->wdd.parent = dev;
+ drvdata->wdd.timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&drvdata->wdd, drvdata);
+ watchdog_set_nowayout(&drvdata->wdd, WATCHDOG_NOWAYOUT);
+
+ val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+
+ if (val & MASK_WDGCS_ENABLE)
+ set_bit(WDOG_HW_RUNNING, &drvdata->wdd.status);
+
+ watchdog_set_restart_priority(&drvdata->wdd, 128);
+
+ watchdog_stop_on_reboot(&drvdata->wdd);
+ err = devm_watchdog_register_device(dev, &drvdata->wdd);
+ if (err) {
+ dev_err(dev, "Failed to register watchdog device");
+ return err;
+ }
+
+ dev_info(dev, "HPE GXP watchdog timer");
+
+ return 0;
+}
+
+static struct platform_driver gxp_wdt_driver = {
+ .probe = gxp_wdt_probe,
+ .driver = {
+ .name = "gxp-wdt",
+ },
+};
+module_platform_driver(gxp_wdt_driver);
+
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>");
+MODULE_DESCRIPTION("Driver for GXP watchdog timer");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 27846c6bdfb0..2d0a06a158a8 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
@@ -30,17 +31,42 @@
#include <linux/uaccess.h>
#include <linux/timex.h>
-#ifdef CONFIG_ARCH_PXA
-#include <mach/regs-ost.h>
-#endif
+#define REG_OSMR0 0x0000 /* OS timer Match Reg. 0 */
+#define REG_OSMR1 0x0004 /* OS timer Match Reg. 1 */
+#define REG_OSMR2 0x0008 /* OS timer Match Reg. 2 */
+#define REG_OSMR3 0x000c /* OS timer Match Reg. 3 */
+#define REG_OSCR 0x0010 /* OS timer Counter Reg. */
+#define REG_OSSR 0x0014 /* OS timer Status Reg. */
+#define REG_OWER 0x0018 /* OS timer Watch-dog Enable Reg. */
+#define REG_OIER 0x001C /* OS timer Interrupt Enable Reg. */
-#include <mach/reset.h>
-#include <mach/hardware.h>
+#define OSSR_M3 (1 << 3) /* Match status channel 3 */
+#define OSSR_M2 (1 << 2) /* Match status channel 2 */
+#define OSSR_M1 (1 << 1) /* Match status channel 1 */
+#define OSSR_M0 (1 << 0) /* Match status channel 0 */
+
+#define OWER_WME (1 << 0) /* Watchdog Match Enable */
+
+#define OIER_E3 (1 << 3) /* Interrupt enable channel 3 */
+#define OIER_E2 (1 << 2) /* Interrupt enable channel 2 */
+#define OIER_E1 (1 << 1) /* Interrupt enable channel 1 */
+#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
static unsigned int pre_margin;
static int boot_status;
+static void __iomem *reg_base;
+
+static inline void sa1100_wr(u32 val, u32 offset)
+{
+ writel_relaxed(val, reg_base + offset);
+}
+
+static inline u32 sa1100_rd(u32 offset)
+{
+ return readl_relaxed(reg_base + offset);
+}
/*
* Allow only one person to hold it open
@@ -51,10 +77,10 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
return -EBUSY;
/* Activate SA1100 Watchdog timer */
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
- writel_relaxed(OSSR_M3, OSSR);
- writel_relaxed(OWER_WME, OWER);
- writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
+ sa1100_wr(OSSR_M3, REG_OSSR);
+ sa1100_wr(OWER_WME, REG_OWER);
+ sa1100_wr(sa1100_rd(REG_OIER) | OIER_E3, REG_OIER);
return stream_open(inode, file);
}
@@ -62,7 +88,7 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
* The watchdog cannot be disabled.
*
* Previous comments suggested that turning off the interrupt by
- * clearing OIER[E3] would prevent the watchdog timing out but this
+ * clearing REG_OIER[E3] would prevent the watchdog timing out but this
* does not appear to be true (at least on the PXA255).
*/
static int sa1100dog_release(struct inode *inode, struct file *file)
@@ -77,7 +103,7 @@ static ssize_t sa1100dog_write(struct file *file, const char __user *data,
{
if (len)
/* Refresh OSMR3 timer. */
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
return len;
}
@@ -111,7 +137,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_KEEPALIVE:
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
ret = 0;
break;
@@ -126,7 +152,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
}
pre_margin = oscr_freq * time;
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
fallthrough;
case WDIOC_GETTIMEOUT:
@@ -152,12 +178,22 @@ static struct miscdevice sa1100dog_miscdev = {
.fops = &sa1100dog_fops,
};
-static int margin __initdata = 60; /* (secs) Default is 1 minute */
+static int margin = 60; /* (secs) Default is 1 minute */
static struct clk *clk;
-static int __init sa1100dog_init(void)
+static int sa1100dog_probe(struct platform_device *pdev)
{
int ret;
+ int *platform_data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ ret = PTR_ERR_OR_ZERO(reg_base);
+ if (ret)
+ return ret;
clk = clk_get(NULL, "OSTIMER0");
if (IS_ERR(clk)) {
@@ -175,13 +211,9 @@ static int __init sa1100dog_init(void)
oscr_freq = clk_get_rate(clk);
- /*
- * Read the reset status, and save it for later. If
- * we suspend, RCSR will be cleared, and the watchdog
- * reset reason will be lost.
- */
- boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
- WDIOF_CARDRESET : 0;
+ platform_data = pdev->dev.platform_data;
+ if (platform_data && *platform_data)
+ boot_status = WDIOF_CARDRESET;
pre_margin = oscr_freq * margin;
ret = misc_register(&sa1100dog_miscdev);
@@ -197,15 +229,21 @@ err:
return ret;
}
-static void __exit sa1100dog_exit(void)
+static int sa1100dog_remove(struct platform_device *pdev)
{
misc_deregister(&sa1100dog_miscdev);
clk_disable_unprepare(clk);
clk_put(clk);
+
+ return 0;
}
-module_init(sa1100dog_init);
-module_exit(sa1100dog_exit);
+struct platform_driver sa1100dog_driver = {
+ .driver.name = "sa1100_wdt",
+ .probe = sa1100dog_probe,
+ .remove = sa1100dog_remove,
+};
+module_platform_driver(sa1100dog_driver);
MODULE_AUTHOR("Oleg Drokin <green@crimea.edu>");
MODULE_DESCRIPTION("SA1100/PXA2xx Watchdog");
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 55acb32842a3..a15729beb9d1 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -175,8 +175,6 @@ undo:
static void __del_gref(struct gntalloc_gref *gref)
{
- unsigned long addr;
-
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap_local_page(gref->page);
tmp[gref->notify.pgoff] = 0;
@@ -190,10 +188,9 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
if (gref->gref_id) {
- if (gref->page) {
- addr = (unsigned long)page_to_virt(gref->page);
- gnttab_end_foreign_access(gref->gref_id, addr);
- } else
+ if (gref->page)
+ gnttab_end_foreign_access(gref->gref_id, gref->page);
+ else
gnttab_free_grant_reference(gref->gref_id);
}
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 91073b4e4a20..940e5e9e8a54 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -524,7 +524,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
for (i = 0; i < count; i++)
if (refs[i] != INVALID_GRANT_REF)
- gnttab_end_foreign_access(refs[i], 0UL);
+ gnttab_end_foreign_access(refs[i], NULL);
}
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 1a1aec0a88a1..7a18292540bc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -430,13 +430,13 @@ int gnttab_try_end_foreign_access(grant_ref_t ref)
}
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
-void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
{
if (gnttab_try_end_foreign_access(ref)) {
- if (page != 0)
- put_page(virt_to_page(page));
+ if (page)
+ put_page(page);
} else
- gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
+ gnttab_add_deferred(ref, page);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
if (xen_auto_xlat_grant_frames.count)
return -EINVAL;
- vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
+ vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
if (vaddr == NULL) {
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
&addr);
@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
}
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
if (!pfn) {
- xen_unmap(vaddr);
+ memunmap(vaddr);
return -ENOMEM;
}
for (i = 0; i < max_nr_gframes; i++)
@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
if (!xen_auto_xlat_grant_frames.count)
return;
kfree(xen_auto_xlat_grant_frames.pfn);
- xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+ memunmap(xen_auto_xlat_grant_frames.vaddr);
xen_auto_xlat_grant_frames.pfn = NULL;
xen_auto_xlat_grant_frames.count = 0;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index e254ed19488f..1826e8e67125 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -238,8 +238,8 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
spin_unlock(&bedata->socket_lock);
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
- gnttab_end_foreign_access(map->active.ring->ref[i], 0);
- gnttab_end_foreign_access(map->active.ref, 0);
+ gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
+ gnttab_end_foreign_access(map->active.ref, NULL);
free_page((unsigned long)map->active.ring);
kfree(map);
@@ -1117,7 +1117,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
}
}
if (bedata->ref != -1)
- gnttab_end_foreign_access(bedata->ref, 0);
+ gnttab_end_foreign_access(bedata->ref, NULL);
kfree(bedata->ring.sring);
kfree(bedata);
xenbus_switch_state(dev, XenbusStateClosed);
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index b6433761d42c..bef8d72a6ca6 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -135,7 +135,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != INVALID_GRANT_REF)
- gnttab_end_foreign_access(buf->grefs[i], 0UL);
+ gnttab_end_foreign_access(buf->grefs[i], NULL);
}
kfree(buf->grefs);
kfree(buf->directory);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d6fdd2d209d3..d5f3f763717e 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -439,7 +439,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
for (i = 0; i < nr_pages; i++) {
if (grefs[i] != INVALID_GRANT_REF) {
- gnttab_end_foreign_access(grefs[i], 0);
+ gnttab_end_foreign_access(grefs[i], NULL);
grefs[i] = INVALID_GRANT_REF;
}
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index d367f2bd2b93..58b732dcbfb8 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -752,8 +752,8 @@ static void xenbus_probe(void)
xenstored_ready = 1;
if (!xen_store_interface) {
- xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
/*
* Now it is safe to free the IRQ used for xenstore late
* initialization. No need to unbind: it is about to be
@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true;
}