aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2020-01-06 09:33:53 -0800
committerOlof Johansson <olof@lixom.net>2020-01-06 09:33:53 -0800
commit4081b335592a46d506e9ab5725ba8d7cd8cea1d9 (patch)
tree3aed3118e0e16349fdc2c7619046bf3854270c3c
parentMerge tag 'renesas-dt-bindings-for-v5.6-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel into arm/dt (diff)
parentARM: dts: mmp3-dell-ariel: Enable the HSIC (diff)
downloadlinux-dev-4081b335592a46d506e9ab5725ba8d7cd8cea1d9.tar.xz
linux-dev-4081b335592a46d506e9ab5725ba8d7cd8cea1d9.zip
Merge branch 'mmp/hsic' into arm/dt
* mmp/hsic: ARM: dts: mmp3-dell-ariel: Enable the HSIC ARM: dts: mmp3: Add HSIC controllers dt-bindings: phy: Add binding for marvell,mmp3-hsic-phy clk: mmp2: Add HSIC clocks dt-bindings: marvell,mmp2: Add clock ids for the HSIC clocks + Linux 5.5-rc2
-rw-r--r--.mailmap2
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst2
-rw-r--r--Documentation/admin-guide/device-mapper/index.rst1
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml2
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml2
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml2
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml9
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml22
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml2
-rw-r--r--Documentation/filesystems/erofs.txt27
-rw-r--r--Documentation/filesystems/overlayfs.rst (renamed from Documentation/filesystems/overlayfs.txt)10
-rw-r--r--Documentation/process/coding-style.rst2
-rw-r--r--Documentation/scsi/smartpqi.txt2
-rw-r--r--Documentation/translations/it_IT/process/coding-style.rst2
-rw-r--r--Documentation/translations/zh_CN/process/coding-style.rst2
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arm/boot/dts/mmp3-dell-ariel.dts22
-rw-r--r--arch/arm/boot/dts/mmp3.dtsi44
-rw-r--r--arch/arm/crypto/curve25519-glue.c7
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c9
-rw-r--r--arch/nios2/mm/ioremap.c8
-rw-r--r--arch/powerpc/net/bpf_jit32.h4
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c16
-rw-r--r--arch/riscv/Kconfig.socs4
-rw-r--r--arch/riscv/boot/Makefile2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/uv.h2
-rw-r--r--arch/s390/kernel/early.c16
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c22
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/s390/lib/spinlock.c1
-rw-r--r--arch/s390/lib/test_unwind.c2
-rw-r--r--arch/s390/mm/kasan_init.c68
-rw-r--r--arch/sh/drivers/platform_early.c11
-rw-r--r--arch/sh/kernel/kgdb.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c8
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/ftrace.c14
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-cgroup.c20
-rw-r--r--block/blk-core.c9
-rw-r--r--crypto/adiantum.c4
-rw-r--r--crypto/essiv.c2
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/base/devtmpfs.c6
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/block/xen-blkback/xenbus.c10
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c6
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/cpuidle/driver.c10
-rw-r--r--drivers/devfreq/devfreq.c273
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c32
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c51
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c204
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c6
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c48
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c108
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h116
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c20
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.h2
-rw-r--r--drivers/i2c/i2c-core-base.c23
-rw-r--r--drivers/iio/accel/st_accel_core.c8
-rw-r--r--drivers/iio/adc/ad7124.c7
-rw-r--r--drivers/iio/adc/ad7606.c2
-rw-r--r--drivers/iio/adc/ad7949.c22
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c2
-rw-r--r--drivers/iio/adc/max1027.c8
-rw-r--r--drivers/iio/adc/max9611.c16
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c23
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h16
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c50
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c34
-rw-r--r--drivers/iio/temperature/ltc2983.c6
-rw-r--r--drivers/infiniband/core/cma.c1
-rw-r--r--drivers/infiniband/core/counters.c3
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c48
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c16
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c133
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c7
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig14
-rw-r--r--drivers/interconnect/qcom/msm8974.c8
-rw-r--r--drivers/interconnect/qcom/qcs404.c8
-rw-r--r--drivers/interconnect/qcom/sdm845.c4
-rw-r--r--drivers/md/dm-clone-metadata.c136
-rw-r--r--drivers/md/dm-clone-metadata.h17
-rw-r--r--drivers/md/dm-clone-target.c53
-rw-r--r--drivers/md/dm-mpath.c37
-rw-r--r--drivers/md/dm-thin-metadata.c29
-rw-r--r--drivers/md/dm-thin-metadata.h7
-rw-r--r--drivers/md/dm-thin.c42
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c8
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c24
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c32
-rw-r--r--drivers/net/fjes/fjes_ethtool.c2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h4
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/fc.c40
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvme/host/rdma.c10
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/of/platform.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_discover.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c6
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c2
-rw-r--r--drivers/staging/exfat/exfat.h4
-rw-r--r--drivers/staging/exfat/exfat_core.c10
-rw-r--r--drivers/staging/exfat/exfat_super.c4
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c12
-rw-r--r--drivers/staging/fbtft/fb_watterott.c13
-rw-r--r--drivers/staging/fbtft/fbtft-core.c6
-rw-r--r--drivers/staging/hp/Kconfig1
-rw-r--r--drivers/staging/isdn/gigaset/usb-gigaset.c23
-rw-r--r--drivers/staging/octeon/Kconfig1
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2
-rw-r--r--drivers/staging/wfx/data_tx.c2
-rw-r--r--drivers/staging/wlan-ng/Kconfig1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c2
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/usb/atm/ueagle-atm.c18
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/common/usb-conn-gpio.c3
-rw-r--r--drivers/usb/core/hcd.c42
-rw-r--r--drivers/usb/core/urb.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c6
-rw-r--r--drivers/usb/dwc3/ep0.c8
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/function/f_ecm.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c1
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-pci.c13
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/mon/mon_bin.c32
-rw-r--r--drivers/usb/roles/class.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c10
-rw-r--r--drivers/usb/storage/scsiglue.c3
-rw-r--r--drivers/usb/typec/class.c6
-rw-r--r--drivers/virtio/virtio_balloon.c36
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--fs/afs/dynroot.c3
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/proc.c7
-rw-r--r--fs/afs/server.c21
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/ceph/caps.c41
-rw-r--r--fs/ceph/debugfs.c13
-rw-r--r--fs/ceph/mds_client.c8
-rw-r--r--fs/ceph/mds_client.h9
-rw-r--r--fs/ceph/mdsmap.c12
-rw-r--r--fs/ceph/super.c28
-rw-r--r--fs/ceph/super.h16
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/smb2inode.c1
-rw-r--r--fs/cifs/smb2ops.c19
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/crypto/keyring.c2
-rw-r--r--fs/erofs/xattr.c2
-rw-r--r--fs/file.c7
-rw-r--r--fs/io-wq.c34
-rw-r--r--fs/io-wq.h7
-rw-r--r--fs/io_uring.c168
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/overlayfs/copy_up.c53
-rw-r--r--fs/overlayfs/dir.c2
-rw-r--r--fs/overlayfs/export.c80
-rw-r--r--fs/overlayfs/inode.c8
-rw-r--r--fs/overlayfs/namei.c52
-rw-r--r--fs/overlayfs/overlayfs.h34
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/super.c24
-rw-r--r--fs/pipe.c36
-rw-r--r--fs/verity/enable.c2
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h2
-rw-r--r--include/linux/blk-cgroup.h2
-rw-r--r--include/linux/devfreq.h14
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/filter.h12
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/i2c.h12
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/nvme-fc-driver.h4
-rw-r--r--include/linux/phy_led_triggers.h2
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/mrp.h2
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h2
-rw-r--r--include/net/netfilter/nf_tables_core.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/uapi/linux/io_uring.h40
-rw-r--r--init/do_mounts.c30
-rw-r--r--init/do_mounts_initrd.c11
-rw-r--r--init/main.c31
-rw-r--r--ipc/util.c2
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/local_storage.c4
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/trace/fgraph.c9
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events_inject.c2
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/raid6/unroll.awk2
-rw-r--r--net/802/mrp.c6
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/bpf/test_run.c8
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c140
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/xdp.c4
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_cthelper.c2
-rw-r--r--net/netfilter/nft_ct.c12
-rw-r--r--net/netfilter/nft_masq.c2
-rw-r--r--net/netfilter/nft_nat.c6
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_tproxy.c4
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/openvswitch/flow.h4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/socket.c4
-rw-r--r--net/unix/af_unix.c2
-rwxr-xr-xscripts/checkpatch.pl9
-rw-r--r--security/integrity/ima/ima_policy.c4
-rw-r--r--sound/firewire/fireface/ff-pcm.c2
-rw-r--r--sound/firewire/motu/motu-pcm.c8
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c20
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/soc/codecs/hdmi-codec.c2
428 files changed, 3141 insertions, 1847 deletions
diff --git a/.mailmap b/.mailmap
index c24773db04a7..00581c1f0983 100644
--- a/.mailmap
+++ b/.mailmap
@@ -276,3 +276,5 @@ Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index 594095b54b29..c00f9f11e3f3 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -144,7 +144,7 @@ journal_crypt:algorithm(:key) (the key is optional)
Encrypt the journal using given algorithm to make sure that the
attacker can't read the journal. You can use a block cipher here
(such as "cbc(aes)") or a stream cipher (for example "chacha20",
- "salsa20", "ctr(aes)" or "ecb(arc4)").
+ "salsa20" or "ctr(aes)").
The journal contains history of last writes to the block device,
an attacker reading the journal could see the last sector nubmers
diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst
index 4872fb6d2952..ec62fcc8eece 100644
--- a/Documentation/admin-guide/device-mapper/index.rst
+++ b/Documentation/admin-guide/device-mapper/index.rst
@@ -8,6 +8,7 @@ Device Mapper
cache-policies
cache
delay
+ dm-clone
dm-crypt
dm-dust
dm-flakey
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 8a1e38a1d7ab..cffe8bb0bad1 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -8,7 +8,7 @@ title: Allwinner platforms device tree bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
index d2a872286437..f0b3d30fbb76 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
@@ -8,7 +8,7 @@ title: Allwinner A64 Display Engine Bus Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
index be32f087c529..9fe11ceecdba 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
@@ -8,7 +8,7 @@ title: Allwinner A23 RSB Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells":
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index 64938fdaea55..4d382128b711 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -8,7 +8,7 @@ title: Allwinner Clock Control Unit Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#clock-cells":
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
index 80b3e7350a73..33c7842917f6 100644
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Security System Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index dafc0980c4fa..0f7074977c04 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -8,7 +8,7 @@ title: Allwinner A31 MIPI-DSI Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells": true
diff --git a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
index 0e7987f1cdb7..d67617f6f74a 100644
--- a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ronbo RB070D30 DSI Display Panel
maintainers:
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
index 15abc0f9429f..83808199657b 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
index 387d599522c7..9e53472be194 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
@@ -8,7 +8,7 @@ title: Allwinner A64 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml
index 740b7f9b535b..c1676b96daac 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml
@@ -8,7 +8,7 @@ title: Allwinner A31 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml b/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
index 9346ef6ba61b..6097e8ac46c1 100644
--- a/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
+++ b/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
@@ -8,7 +8,7 @@ title: Allwinner A31 P2WI (Push/Pull 2 Wires Interface) Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: /schemas/i2c/i2c-controller.yaml#
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
index b68be3aaf587..e1f6d64bdccd 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/iio/adc/adi,ad7292.yaml#
@@ -53,7 +53,8 @@ patternProperties:
description: |
The channel number. It can have up to 8 channels numbered from 0 to 7.
items:
- maximum: 7
+ - minimum: 0
+ maximum: 7
diff-channels:
description: see Documentation/devicetree/bindings/iio/adc/adc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml b/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml
index d74962c0f5ae..15c514b83583 100644
--- a/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml
@@ -8,7 +8,7 @@ title: Allwinner A33 Thermal Sensor Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#io-channel-cells":
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index b3bd8ef7fbd6..5b3b71c9c018 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 LRADC Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml
index 23a202d24e43..953d875b5e74 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Interrupt Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: /schemas/interrupt-controller.yaml#
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index 8cd08cfb25be..cf09055da78b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -8,7 +8,7 @@ title: Allwinner A20 Non-Maskable Interrupt Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: /schemas/interrupt-controller.yaml#
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
index d3e423fcb6c2..0f6374ceaa69 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
description: |-
The Allwinner A10 and later has a CMOS Sensor Interface to retrieve
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
index dea36d68cdbe..7838804700d6 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Infrared Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "rc.yaml#"
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
index 30d9fb193d7f..22a94b6fdbde 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
@@ -60,7 +60,8 @@ patternProperties:
maximum: 1066000000
nvidia,emem-configuration:
- $ref: /schemas/types.yaml#/definitions/uint32-array
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Values to be written to the EMEM register block. See section
"15.6.1 MC Registers" in the TRM.
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
index 7fe0ca14e324..e4135bac6957 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
@@ -56,7 +56,8 @@ patternProperties:
maximum: 900000000
nvidia,emc-auto-cal-interval:
- $ref: /schemas/types.yaml#/definitions/uint32
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
description:
Pad calibration interval in microseconds.
minimum: 0
@@ -78,7 +79,8 @@ patternProperties:
Mode Register 0.
nvidia,emc-zcal-cnt-long:
- $ref: /schemas/types.yaml#/definitions/uint32
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
description:
Number of EMC clocks to wait before issuing any commands after
sending ZCAL_MRW_CMD.
@@ -96,7 +98,8 @@ patternProperties:
FBIO "read" FIFO periodic resetting enabled.
nvidia,emc-configuration:
- $ref: /schemas/types.yaml#/definitions/uint32-array
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
description:
EMC timing characterization data. These are the registers
(see section "18.13.2 EMC Registers" in the TRM) whose values
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
index 84fd57bcf0dc..4b9196c83291 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
@@ -77,7 +77,8 @@ patternProperties:
maximum: 900000000
nvidia,emem-configuration:
- $ref: /schemas/types.yaml#/definitions/uint32-array
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Values to be written to the EMEM register block. See section
"18.13.1 MC Registers" in the TRM.
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
index 4b1a09acb98b..39afacc447b2 100644
--- a/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Resistive Touchscreen Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#thermal-sensor-cells":
diff --git a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
index 64bca41031d5..e82c9a07b6fb 100644
--- a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells": true
diff --git a/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
index b5b3cf5b1ac2..5d3fa412aabd 100644
--- a/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells": true
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
index ae4796ec50a0..8d8560a67abf 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
index e5562c525ed9..767193ec1d32 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 MDIO Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "mdio.yaml#"
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
index f683b7104e3e..703d0d886884 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 11654d4b80fb..db36b4d86484 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -8,7 +8,7 @@ title: Allwinner A83t EMAC Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
index 770af7c46114..a95960ee3feb 100644
--- a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
+++ b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 CAN Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index 81ae8cafabc1..ac8c76369a86 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/ti,cpsw-switch.yaml#
@@ -44,7 +44,6 @@ properties:
description: CPSW functional clock
clock-names:
- maxItems: 1
items:
- const: fck
@@ -70,7 +69,6 @@ properties:
Phandle to the system control device node which provides access to
efuse IO range with MAC addresses
-
ethernet-ports:
type: object
properties:
@@ -82,8 +80,6 @@ properties:
patternProperties:
"^port@[0-9]+$":
type: object
- minItems: 1
- maxItems: 2
description: CPSW external ports
allOf:
@@ -91,23 +87,20 @@ properties:
properties:
reg:
- maxItems: 1
- enum: [1, 2]
+ items:
+ - enum: [1, 2]
description: CPSW port number
phys:
- $ref: /schemas/types.yaml#definitions/phandle-array
maxItems: 1
description: phandle on phy-gmii-sel PHY
label:
- $ref: /schemas/types.yaml#/definitions/string-array
- maxItems: 1
description: label associated with this port
ti,dual-emac-pvid:
- $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 1024
description:
@@ -136,7 +129,6 @@ properties:
description: CPTS reference clock
clock-names:
- maxItems: 1
items:
- const: cpts
@@ -201,7 +193,7 @@ examples:
phys = <&phy_gmii_sel 1>;
phy-handle = <&ethphy0_sw>;
phy-mode = "rgmii";
- ti,dual_emac_pvid = <1>;
+ ti,dual-emac-pvid = <1>;
};
cpsw_port2: port@2 {
@@ -211,7 +203,7 @@ examples:
phys = <&phy_gmii_sel 2>;
phy-handle = <&ethphy1_sw>;
phy-mode = "rgmii";
- ti,dual_emac_pvid = <2>;
+ ti,dual-emac-pvid = <2>;
};
};
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index 659b02002a35..daf1321d76ad 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Security ID Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "nvmem.yaml#"
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
index fa46670de299..230d74f22136 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -8,7 +8,7 @@ title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#phy-cells":
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
new file mode 100644
index 000000000000..7917a95cda78
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright 2019 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/marvell,mmp3-hsic-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Marvell MMP3 HSIC PHY
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+ compatible:
+ const: marvell,mmp3-hsic-phy
+
+ reg:
+ maxItems: 1
+ description: base address of the device
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to reset
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - "#phy-cells"
+
+examples:
+ - |
+ hsic-phy@f0001800 {
+ compatible = "marvell,mmp3-hsic-phy";
+ reg = <0xf0001800 0x40>;
+ reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index cd0503b6fe36..bfefd09d8c1e 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Pin Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#gpio-cells":
diff --git a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
index 0ac52f83a58c..4a21fe77ee1d 100644
--- a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 PWM Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#pwm-cells":
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index acf18d170352..c0d83865e933 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -50,6 +50,8 @@ properties:
description: Should contain the WWDG1 watchdog reset interrupt
maxItems: 1
+ wakeup-source: true
+
mboxes:
description:
This property is required only if the rpmsg/virtio functionality is used.
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml
index 46d69c32b89b..478b0234e8fa 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index d7a57ec4a640..37c2a601c3fa 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -8,7 +8,7 @@ title: Allwinner A31 RTC Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#clock-cells":
diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml b/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
index ee9712f1c97d..2ecab8ed702a 100644
--- a/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
+++ b/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 PS2 Host Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
description:
A20 PS2 is dual role controller (PS2 host and PS2 device). These
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index b8f89c7258eb..ea1d2efb2aaa 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Codec Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#sound-dai-cells":
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
index eb3992138eec..112ae00d63c1 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 I2S Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#sound-dai-cells":
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
index 38d4cede0860..444a432912bb 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
@@ -10,7 +10,7 @@ maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Liam Girdwood <lgirdwood@gmail.com>
- Mark Brown <broonie@kernel.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#sound-dai-cells":
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
index f290eb72a878..3b764415c9ab 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
@@ -8,7 +8,7 @@ title: Allwinner A64 Analog Codec Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
index 85305b4c2729..9718358826ab 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
@@ -8,7 +8,7 @@ title: Allwinner A23 Analog Codec Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
index 5e7cc05bbff1..55d28268d2f4 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
@@ -8,7 +8,7 @@ title: Allwinner A33 Codec Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#sound-dai-cells":
diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml
index 6d1329c28170..8036499112f5 100644
--- a/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells": true
diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
index f36c46d236d7..0565dc49e449 100644
--- a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
"#address-cells": true
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
index 20adc1c8e9cc..23e989e09766 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 Timer Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
index dfa0c41fd261..40fc4bcb3145 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
@@ -8,7 +8,7 @@ title: Allwinner A13 High-Speed Timer Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
index 0af70fc8de5a..d9207bf9d894 100644
--- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
@@ -8,7 +8,7 @@ title: Allwinner A10 mUSB OTG Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
index 3a54f58683a0..e8f226376108 100644
--- a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
@@ -11,7 +11,7 @@ allOf:
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- - Maxime Ripard <maxime.ripard@bootlin.com>
+ - Maxime Ripard <mripard@kernel.org>
properties:
compatible:
diff --git a/Documentation/filesystems/erofs.txt b/Documentation/filesystems/erofs.txt
index b0c085326e2e..db6d39c3ae71 100644
--- a/Documentation/filesystems/erofs.txt
+++ b/Documentation/filesystems/erofs.txt
@@ -24,11 +24,11 @@ Here is the main features of EROFS:
- Metadata & data could be mixed by design;
- 2 inode versions for different requirements:
- v1 v2
+ compact (v1) extended (v2)
Inode metadata size: 32 bytes 64 bytes
Max file size: 4 GB 16 EB (also limited by max. vol size)
Max uids/gids: 65536 4294967296
- File creation time: no yes (64 + 32-bit timestamp)
+ File change time: no yes (64 + 32-bit timestamp)
Max hardlinks: 65536 4294967296
Metadata reserved: 4 bytes 14 bytes
@@ -39,7 +39,7 @@ Here is the main features of EROFS:
- Support POSIX.1e ACLs by using xattrs;
- Support transparent file compression as an option:
- LZ4 algorithm with 4 KB fixed-output compression for high performance;
+ LZ4 algorithm with 4 KB fixed-sized output compression for high performance.
The following git tree provides the file system user-space tools under
development (ex, formatting tool mkfs.erofs):
@@ -85,7 +85,7 @@ All data areas should be aligned with the block size, but metadata areas
may not. All metadatas can be now observed in two different spaces (views):
1. Inode metadata space
Each valid inode should be aligned with an inode slot, which is a fixed
- value (32 bytes) and designed to be kept in line with v1 inode size.
+ value (32 bytes) and designed to be kept in line with compact inode size.
Each inode can be directly found with the following formula:
inode offset = meta_blkaddr * block_size + 32 * nid
@@ -117,10 +117,10 @@ may not. All metadatas can be now observed in two different spaces (views):
|-> aligned with 4B
Inode could be 32 or 64 bytes, which can be distinguished from a common
- field which all inode versions have -- i_advise:
+ field which all inode versions have -- i_format:
__________________ __________________
- | i_advise | | i_advise |
+ | i_format | | i_format |
|__________________| |__________________|
| ... | | ... |
| | | |
@@ -129,12 +129,13 @@ may not. All metadatas can be now observed in two different spaces (views):
|__________________| 64 bytes
Xattrs, extents, data inline are followed by the corresponding inode with
- proper alignes, and they could be optional for different data mappings,
- _currently_ there are totally 3 valid data mappings supported:
+ proper alignment, and they could be optional for different data mappings.
+ _currently_ total 4 valid data mappings are supported:
- 1) flat file data without data inline (no extent);
- 2) fixed-output size data compression (must have extents);
- 3) flat file data with tail-end data inline (no extent);
+ 0 flat file data without data inline (no extent);
+ 1 fixed-sized output data compression (with non-compacted indexes);
+ 2 flat file data with tail packing data inline (no extent);
+ 3 fixed-sized output data compression (with compacted indexes, v5.3+).
The size of the optional xattrs is indicated by i_xattr_count in inode
header. Large xattrs or xattrs shared by many different files can be
@@ -182,8 +183,8 @@ introduce another on-disk field at all.
Compression
-----------
-Currently, EROFS supports 4KB fixed-output clustersize transparent file
-compression, as illustrated below:
+Currently, EROFS supports 4KB fixed-sized output transparent file compression,
+as illustrated below:
|---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
clusterofs clusterofs clusterofs
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.rst
index 845d689e0fd7..e443be7928db 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Written by: Neil Brown
Please see MAINTAINERS file for where to send questions.
@@ -181,7 +183,7 @@ Kernel config options:
worried about backward compatibility with kernels that have the redirect_dir
feature and follow redirects even if turned off.
-Module options (can also be changed through /sys/module/overlay/parameters/*):
+Module options (can also be changed through /sys/module/overlay/parameters/):
- "redirect_dir=BOOL":
See OVERLAY_FS_REDIRECT_DIR kernel config option above.
@@ -263,7 +265,7 @@ top, lower2 the middle and lower3 the bottom layer.
Metadata only copy up
---------------------
+---------------------
When metadata only copy up feature is enabled, overlayfs will only copy
up metadata (as opposed to whole file), when a metadata specific operation
@@ -286,10 +288,10 @@ pointed by REDIRECT. This should not be possible on local system as setting
"trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
for untrusted layers like from a pen drive.
-Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and
+Note: redirect_dir={off|nofollow|follow[*]} conflicts with metacopy=on, and
results in an error.
-(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
+[*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
given.
Sharing and copying layers
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index ada573b7d703..edb296c52f61 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -988,7 +988,7 @@ Similarly, if you need to calculate the size of some structure member, use
.. code-block:: c
- #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+ #define sizeof_field(t, f) (sizeof(((t*)0)->f))
There are also min() and max() macros that do strict type checking if you
need them. Feel free to peruse that header file to see what else is already
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
index 201f80c7c050..df129f55ace5 100644
--- a/Documentation/scsi/smartpqi.txt
+++ b/Documentation/scsi/smartpqi.txt
@@ -29,7 +29,7 @@ smartpqi specific entries in /sys
smartpqi host attributes:
-------------------------
/sys/class/scsi_host/host*/rescan
- /sys/class/scsi_host/host*/version
+ /sys/class/scsi_host/host*/driver_version
The host rescan attribute is a write only attribute. Writing to this
attribute will trigger the driver to scan for new, changed, or removed
diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst
index 8995d2d19f20..8725f2b9e960 100644
--- a/Documentation/translations/it_IT/process/coding-style.rst
+++ b/Documentation/translations/it_IT/process/coding-style.rst
@@ -1005,7 +1005,7 @@ struttura, usate
.. code-block:: c
- #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+ #define sizeof_field(t, f) (sizeof(((t*)0)->f))
Ci sono anche le macro min() e max() che, se vi serve, effettuano un controllo
rigido sui tipi. Sentitevi liberi di leggere attentamente questo file
diff --git a/Documentation/translations/zh_CN/process/coding-style.rst b/Documentation/translations/zh_CN/process/coding-style.rst
index 4f6237392e65..eae10bc7f86f 100644
--- a/Documentation/translations/zh_CN/process/coding-style.rst
+++ b/Documentation/translations/zh_CN/process/coding-style.rst
@@ -826,7 +826,7 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
.. code-block:: c
- #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+ #define sizeof_field(t, f) (sizeof(((t*)0)->f))
还有可以做严格的类型检查的 min() 和 max() 宏,如果你需要可以使用它们。你可以
自己看看那个头文件里还定义了什么你可以拿来用的东西,如果有定义的话,你就不应
diff --git a/MAINTAINERS b/MAINTAINERS
index e6db3889cb19..b8403e2d2ef2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4971,6 +4971,7 @@ F: include/linux/dma-buf*
F: include/linux/reservation.h
F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
+K: dma_(buf|fence|resv)
T: git git://anongit.freedesktop.org/drm/drm-misc
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
@@ -12394,7 +12395,7 @@ L: linux-unionfs@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
S: Supported
F: fs/overlayfs/
-F: Documentation/filesystems/overlayfs.txt
+F: Documentation/filesystems/overlayfs.rst
P54 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
@@ -16315,12 +16316,10 @@ F: drivers/media/radio/radio-raremono.c
THERMAL
M: Zhang Rui <rui.zhang@intel.com>
-M: Eduardo Valentin <edubezval@gmail.com>
-R: Daniel Lezcano <daniel.lezcano@linaro.org>
+M: Daniel Lezcano <daniel.lezcano@linaro.org>
R: Amit Kucheria <amit.kucheria@verdurent.com>
L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
Q: https://patchwork.kernel.org/project/linux-pm/list/
S: Supported
F: drivers/thermal/
diff --git a/Makefile b/Makefile
index 73e3c2802927..f900c23b8291 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index dc05a63516f5..27ea64b1fa33 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -42,10 +42,10 @@ do { \
#define EXTRA_INFO(f) { \
BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
- % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ % sizeof_field(struct unwind_frame_info, f)) \
+ offsetof(struct unwind_frame_info, f) \
- / FIELD_SIZEOF(struct unwind_frame_info, f), \
- FIELD_SIZEOF(struct unwind_frame_info, f) \
+ / sizeof_field(struct unwind_frame_info, f), \
+ sizeof_field(struct unwind_frame_info, f) \
}
#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
diff --git a/arch/arm/boot/dts/mmp3-dell-ariel.dts b/arch/arm/boot/dts/mmp3-dell-ariel.dts
index c1947b5a688d..15449c72c042 100644
--- a/arch/arm/boot/dts/mmp3-dell-ariel.dts
+++ b/arch/arm/boot/dts/mmp3-dell-ariel.dts
@@ -49,6 +49,28 @@
status = "okay";
};
+&hsic0 {
+ status = "okay";
+
+ usb1@1 {
+ compatible = "usb424,2640";
+ reg = <0x01>;
+ #address-cells = <0x01>;
+ #size-cells = <0x00>;
+
+ mass-storage@1 {
+ compatible = "usb424,4040";
+ reg = <0x01>;
+ status = "disabled";
+ };
+ };
+};
+
+&hsic_phy0 {
+ status = "okay";
+ reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
+};
+
&mmc3 {
status = "okay";
max-frequency = <50000000>;
diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
index d9762de0ed34..36c50706e60e 100644
--- a/arch/arm/boot/dts/mmp3.dtsi
+++ b/arch/arm/boot/dts/mmp3.dtsi
@@ -201,6 +201,50 @@
status = "disabled";
};
+ hsic_phy0: hsic-phy@f0001800 {
+ compatible = "marvell,mmp3-hsic-phy",
+ "usb-nop-xceiv",
+ reg = <0xf0001800 0x40>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ hsic0: hsic@f0001000 {
+ compatible = "marvell,pxau2o-ehci";
+ reg = <0xf0001000 0x200>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&soc_clocks MMP2_CLK_USBHSIC0>;
+ clock-names = "USBCLK";
+ phys = <&hsic_phy0>;
+ phy-names = "usb";
+ phy_type = "hsic";
+ #address-cells = <0x01>;
+ #size-cells = <0x00>;
+ status = "disabled";
+ };
+
+ hsic_phy1: hsic-phy@f0002800 {
+ compatible = "marvell,mmp3-hsic-phy",
+ "usb-nop-xceiv",
+ reg = <0xf0002800 0x40>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ hsic1: hsic@f0002000 {
+ compatible = "marvell,pxau2o-ehci";
+ reg = <0xf0002000 0x200>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&soc_clocks MMP2_CLK_USBHSIC1>;
+ clock-names = "USBCLK";
+ phys = <&hsic_phy1>;
+ phy-names = "usb";
+ phy_type = "hsic";
+ #address-cells = <0x01>;
+ #size-cells = <0x00>;
+ status = "disabled";
+ };
+
mmc1: mmc@d4280000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xd4280000 0x120>;
diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c
index f3f42cf3b893..776ae07e0469 100644
--- a/arch/arm/crypto/curve25519-glue.c
+++ b/arch/arm/crypto/curve25519-glue.c
@@ -38,6 +38,13 @@ void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
}
EXPORT_SYMBOL(curve25519_arch);
+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE])
+{
+ return curve25519_arch(pub, secret, curve25519_base_point);
+}
+EXPORT_SYMBOL(curve25519_base_arch);
+
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index ba8f82a29a81..e794b2d53adf 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -45,13 +45,6 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
/* See header file for descriptions of functions */
/**
- * This macro returns the size of a member of a structure.
- * Logically it is the same as "sizeof(s::field)" in C++, but
- * C lacks the "::" operator.
- */
-#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
-
-/**
* This macro returns a member of the
* cvmx_bootmem_named_block_desc_t structure. These members can't
* be directly addressed as they might be in memory not directly
@@ -65,7 +58,7 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
__cvmx_bootmem_desc_get(addr, \
offsetof(struct cvmx_bootmem_named_block_desc, field), \
- SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+ sizeof_field(struct cvmx_bootmem_named_block_desc, field))
/**
* This function is the implementation of the get macros defined
diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c
index b56af759dcdf..819bdfcc2e71 100644
--- a/arch/nios2/mm/ioremap.c
+++ b/arch/nios2/mm/ioremap.c
@@ -138,6 +138,14 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
return NULL;
}
+ /*
+ * Map uncached objects in the low part of address space to
+ * CONFIG_NIOS2_IO_REGION_BASE
+ */
+ if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
+ IS_MAPPABLE_UNCACHEABLE(last_addr))
+ return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
+
/* Mappings have to be page-aligned */
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index 6e5a2a4faeab..4ec2a9f14f84 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -97,12 +97,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC64
#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
+ do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \
PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
} while (0)
#else
#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4); \
+ do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \
PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \
} while(0)
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d57b46e0dd60..0acc9d5fb19e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -321,7 +321,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
break;
case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
break;
case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
@@ -333,16 +333,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
/*** Ancillary info loads ***/
case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ BUILD_BUG_ON(sizeof_field(struct sk_buff,
protocol) != 2);
PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
protocol));
break;
case BPF_ANC | SKF_AD_IFINDEX:
case BPF_ANC | SKF_AD_HATYPE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ BUILD_BUG_ON(sizeof_field(struct net_device,
ifindex) != 4);
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ BUILD_BUG_ON(sizeof_field(struct net_device,
type) != 2);
PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev));
@@ -365,17 +365,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
break;
case BPF_ANC | SKF_AD_MARK:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
mark));
break;
case BPF_ANC | SKF_AD_RXHASH:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
hash));
break;
case BPF_ANC | SKF_AD_VLAN_TAG:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
@@ -388,7 +388,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_ANDI(r_A, r_A, 1);
break;
case BPF_ANC | SKF_AD_QUEUE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ BUILD_BUG_ON(sizeof_field(struct sk_buff,
queue_mapping) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
queue_mapping));
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 634759ac8c71..d325b67d00df 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -2,8 +2,8 @@ menu "SoC selection"
config SOC_SIFIVE
bool "SiFive SoCs"
- select SERIAL_SIFIVE
- select SERIAL_SIFIVE_CONSOLE
+ select SERIAL_SIFIVE if TTY
+ select SERIAL_SIFIVE_CONSOLE if TTY
select CLK_SIFIVE
select CLK_SIFIVE_FU540_PRCI
select SIFIVE_PLIC
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index a474f98ce4fa..36db8145f9f4 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -24,7 +24,7 @@ $(obj)/Image: vmlinux FORCE
$(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip)
-loader.o: $(src)/loader.S $(obj)/Image
+$(obj)/loader.o: $(src)/loader.S $(obj)/Image
$(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE
$(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d4051e88e625..bc88841d335d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -124,6 +124,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
+ select HAVE_ARCH_KASAN_VMALLOC
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 6dc6c4fbc8e2..69289e99cabd 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -27,7 +27,6 @@
#define MACHINE_FLAG_DIAG9C BIT(3)
#define MACHINE_FLAG_ESOP BIT(4)
#define MACHINE_FLAG_IDTE BIT(5)
-#define MACHINE_FLAG_DIAG44 BIT(6)
#define MACHINE_FLAG_EDAT1 BIT(7)
#define MACHINE_FLAG_EDAT2 BIT(8)
#define MACHINE_FLAG_TOPOLOGY BIT(10)
@@ -94,7 +93,6 @@ extern unsigned long __swsusp_reset_dma;
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index ef3c00b049ab..4093a2856929 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -86,7 +86,7 @@ static inline int share(unsigned long addr, u16 cmd)
};
if (!is_prot_virt_guest())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* Sharing is page wise, if we encounter addresses that are
* not page aligned, we assume something went wrong. If
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index db32a55daaec..cd241ee66eff 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -204,21 +204,6 @@ static __init void detect_diag9c(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
-static __init void detect_diag44(void)
-{
- int rc;
-
- diag_stat_inc(DIAG_STAT_X044);
- asm volatile(
- " diag 0,0,0x44\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-}
-
static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
@@ -331,7 +316,6 @@ void __init startup_init(void)
setup_arch_string();
setup_boot_command_line();
detect_diag9c();
- detect_diag44();
detect_machine_facilities();
save_vector_registers();
setup_topology();
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c07fdcd73726..77d93c534284 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1303,18 +1303,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
if (flush_all && done)
break;
-
- /* If an event overflow happened, discard samples by
- * processing any remaining sample-data-blocks.
- */
- if (event_overflow)
- flush_all = 1;
}
/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
+
+ /* Perf_event_overflow() and perf_event_account_interrupt() limit
+ * the interrupt rate to an upper limit. Roughly 1000 samples per
+ * task tick.
+ * Hitting this limit results in a large number
+ * of throttled REF_REPORT_THROTTLE entries and the samples
+ * are dropped.
+ * Slightly increase the interval to avoid hitting this limit.
+ */
+ if (event_overflow) {
+ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
+ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
+ __func__,
+ DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
+ }
+
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "%s: "
"overflows: sample %llu event %llu"
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2794cad9312e..a08bd2522dd9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -413,14 +413,11 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
- if (MACHINE_HAS_DIAG9C) {
- diag_stat_inc_norecursion(DIAG_STAT_X09C);
- asm volatile("diag %0,0,0x9c"
- : : "d" (pcpu_devices[cpu].address));
- } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) {
- diag_stat_inc_norecursion(DIAG_STAT_X044);
- asm volatile("diag 0,0,0x44");
- }
+ if (!MACHINE_HAS_DIAG9C)
+ return;
+ diag_stat_inc_norecursion(DIAG_STAT_X09C);
+ asm volatile("diag %0,0,0x9c"
+ : : "d" (pcpu_devices[cpu].address));
}
/*
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index ce1e4bbe53aa..9b2dab5a69f9 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -242,7 +242,6 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
- /* Use classic spinlocks + niai if the steal time is >= 10% */
if (test_cpu_flag(CIF_DEDICATED_CPU))
arch_spin_lock_queued(lp);
else
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index bda7ac0ddd29..32b7a30b2485 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -238,7 +238,7 @@ static int test_unwind_irq(struct unwindme *u)
{
preempt_disable();
if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
- pr_info("Couldn't reqister external interrupt handler");
+ pr_info("Couldn't register external interrupt handler");
return -1;
}
u->task = current;
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 460f25572940..06345616a646 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void)
enum populate_mode {
POPULATE_ONE2ONE,
POPULATE_MAP,
- POPULATE_ZERO_SHADOW
+ POPULATE_ZERO_SHADOW,
+ POPULATE_SHALLOW
};
static void __init kasan_early_vmemmap_populate(unsigned long address,
unsigned long end,
@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgd_populate(&init_mm, pg_dir, p4_dir);
}
+ if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+ mode == POPULATE_SHALLOW) {
+ address = (address + P4D_SIZE) & P4D_MASK;
+ continue;
+ }
+
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
p4d_populate(&init_mm, p4_dir, pu_dir);
}
+ if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+ mode == POPULATE_SHALLOW) {
+ address = (address + PUD_SIZE) & PUD_MASK;
+ continue;
+ }
+
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
page = kasan_early_shadow_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
break;
+ case POPULATE_SHALLOW:
+ /* should never happen */
+ break;
}
}
address += PAGE_SIZE;
@@ -313,22 +329,50 @@ void __init kasan_early_init(void)
init_mm.pgd = early_pg_dir;
/*
* Current memory layout:
- * +- 0 -------------+ +- shadow start -+
- * | 1:1 ram mapping | /| 1/8 ram |
- * +- end of ram ----+ / +----------------+
- * | ... gap ... |/ | kasan |
- * +- shadow start --+ | zero |
- * | 1/8 addr space | | page |
- * +- shadow end -+ | mapping |
- * | ... gap ... |\ | (untracked) |
- * +- modules vaddr -+ \ +----------------+
- * | 2Gb | \| unmapped | allocated per module
- * +-----------------+ +- shadow end ---+
+ * +- 0 -------------+ +- shadow start -+
+ * | 1:1 ram mapping | /| 1/8 ram |
+ * | | / | |
+ * +- end of ram ----+ / +----------------+
+ * | ... gap ... | / | |
+ * | |/ | kasan |
+ * +- shadow start --+ | zero |
+ * | 1/8 addr space | | page |
+ * +- shadow end -+ | mapping |
+ * | ... gap ... |\ | (untracked) |
+ * +- vmalloc area -+ \ | |
+ * | vmalloc_size | \ | |
+ * +- modules vaddr -+ \ +----------------+
+ * | 2Gb | \| unmapped | allocated per module
+ * +-----------------+ +- shadow end ---+
+ *
+ * Current memory layout (KASAN_VMALLOC):
+ * +- 0 -------------+ +- shadow start -+
+ * | 1:1 ram mapping | /| 1/8 ram |
+ * | | / | |
+ * +- end of ram ----+ / +----------------+
+ * | ... gap ... | / | kasan |
+ * | |/ | zero |
+ * +- shadow start --+ | page |
+ * | 1/8 addr space | | mapping |
+ * +- shadow end -+ | (untracked) |
+ * | ... gap ... |\ | |
+ * +- vmalloc area -+ \ +- vmalloc area -+
+ * | vmalloc_size | \ |shallow populate|
+ * +- modules vaddr -+ \ +- modules area -+
+ * | 2Gb | \|shallow populate|
+ * +-----------------+ +- shadow end ---+
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN;
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
+ /* shallowly populate kasan shadow for vmalloc and modules */
+ kasan_early_vmemmap_populate(__sha(untracked_mem_end),
+ __sha(vmax), POPULATE_SHALLOW);
+ }
+ /* populate kasan shadow for untracked memory */
kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end),
POPULATE_ZERO_SHADOW);
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
index f6d148451dfc..f3dc3f25b3ff 100644
--- a/arch/sh/drivers/platform_early.c
+++ b/arch/sh/drivers/platform_early.c
@@ -325,9 +325,9 @@ int __init sh_early_platform_driver_probe(char *class_str,
}
/**
- * sh_early_platform_cleanup - clean up early platform code
+ * early_platform_cleanup - clean up early platform code
*/
-static int __init sh_early_platform_cleanup(void)
+void __init early_platform_cleanup(void)
{
struct platform_device *pd, *pd2;
@@ -337,11 +337,4 @@ static int __init sh_early_platform_cleanup(void)
list_del(&pd->dev.devres_head);
memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
}
-
- return 0;
}
-/*
- * This must happen once after all early devices are probed but before probing
- * real platform devices.
- */
-subsys_initcall(sh_early_platform_cleanup);
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 6d61f8cf4c13..0d5f3c9d52f3 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -266,6 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
+ /* fallthrough */
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index 84cc8f7f83e9..c8eabb973b86 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -180,19 +180,19 @@ do { \
#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
do { unsigned int _off = offsetof(STRUCT, FIELD); \
- BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \
+ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *)); \
*prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
} while (0)
#define emit_load32(BASE, STRUCT, FIELD, DEST) \
do { unsigned int _off = offsetof(STRUCT, FIELD); \
- BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \
+ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32)); \
*prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
} while (0)
#define emit_load16(BASE, STRUCT, FIELD, DEST) \
do { unsigned int _off = offsetof(STRUCT, FIELD); \
- BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \
+ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16)); \
*prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
} while (0)
@@ -202,7 +202,7 @@ do { unsigned int _off = offsetof(STRUCT, FIELD); \
} while (0)
#define emit_load8(BASE, STRUCT, FIELD, DEST) \
-do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
+do { BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8)); \
__emit_load8(BASE, STRUCT, FIELD, DEST); \
} while (0)
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 319be936c348..fa31470bbf24 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -259,7 +259,7 @@ static void __init setup_xstate_features(void)
xmm_space);
xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
- xstate_sizes[XFEATURE_SSE] = FIELD_SIZEOF(struct fxregs_state,
+ xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
xmm_space);
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 060a361d9d11..024c3053dbba 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -1043,20 +1043,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
return;
/*
- * If the return location is actually pointing directly to
- * the start of a direct trampoline (if we trace the trampoline
- * it will still be offset by MCOUNT_INSN_SIZE), then the
- * return address is actually off by one word, and we
- * need to adjust for that.
- */
- if (ftrace_direct_func_count) {
- if (ftrace_find_direct_func(self_addr + MCOUNT_INSN_SIZE)) {
- self_addr = *parent;
- parent++;
- }
- }
-
- /*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
diff --git a/block/bio.c b/block/bio.c
index 9d54aa37ce6c..a5d75f6bf4c7 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
+ if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return false;
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 708dea92dac8..a229b94d5390 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1062,26 +1062,6 @@ err_unlock:
}
/**
- * blkcg_drain_queue - drain blkcg part of request_queue
- * @q: request_queue to drain
- *
- * Called from blk_drain_queue(). Responsible for draining blkcg part.
- */
-void blkcg_drain_queue(struct request_queue *q)
-{
- lockdep_assert_held(&q->queue_lock);
-
- /*
- * @q could be exiting and already have destroyed all blkgs as
- * indicated by NULL root_blkg. If so, don't confuse policies.
- */
- if (!q->root_blkg)
- return;
-
- blk_throtl_drain(q);
-}
-
-/**
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
diff --git a/block/blk-core.c b/block/blk-core.c
index a1e228752083..e0a094fddee5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
- if (blk_do_io_stat(req)) {
+ if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
@@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
- if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
+ if (req->part && blk_do_io_stat(req) &&
+ !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
@@ -1792,9 +1793,9 @@ int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
- FIELD_SIZEOF(struct request, cmd_flags));
+ sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
- FIELD_SIZEOF(struct bio, bi_opf));
+ sizeof_field(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index aded26092268..9dc53cf9b1f1 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -436,10 +436,10 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
sizeof(struct adiantum_request_ctx));
- subreq_size = max(FIELD_SIZEOF(struct adiantum_request_ctx,
+ subreq_size = max(sizeof_field(struct adiantum_request_ctx,
u.hash_desc) +
crypto_shash_descsize(hash),
- FIELD_SIZEOF(struct adiantum_request_ctx,
+ sizeof_field(struct adiantum_request_ctx,
u.streamcipher_req) +
crypto_skcipher_reqsize(streamcipher));
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 808f2b362106..495a2d1e1460 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -347,7 +347,7 @@ static int essiv_aead_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- subreq_size = FIELD_SIZEOF(struct essiv_aead_request_ctx, aead_req) +
+ subreq_size = sizeof_field(struct essiv_aead_request_ctx, aead_req) +
crypto_aead_reqsize(aead);
tctx->ivoffset = offsetof(struct essiv_aead_request_ctx, aead_req) +
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 08bb9f2f2d23..5e4a8860a9c0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1314,9 +1314,19 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
+ /*
+ * Skip devices whose ACPI companions match the device IDs below,
+ * because they require special power management handling incompatible
+ * with the generic ACPI PM domain.
+ */
+ static const struct acpi_device_id special_pm_ids[] = {
+ {"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT3404", }, /* Fan */
+ {}
+ };
struct acpi_device *adev = ACPI_COMPANION(dev);
- if (!adev)
+ if (!adev || !acpi_match_device_ids(adev, special_pm_ids))
return 0;
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e9bc9fcc7ea5..b2dad43dbf82 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3310,7 +3310,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t parent_offset;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
- size_t num_valid = (buffer_offset - off_start_offset) *
+ size_t num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
struct binder_buffer_object *parent =
binder_validate_ptr(target_proc, t->buffer,
@@ -3384,7 +3384,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->user_data + sg_buf_offset;
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
- num_valid = (buffer_offset - off_start_offset) *
+ num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
ret = binder_fixup_parent(t, thread, bp,
off_start_offset,
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 30d0523014e0..6cdbf1531238 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -359,7 +359,7 @@ static int handle_remove(const char *nodename, struct device *dev)
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
-int devtmpfs_mount(const char *mntdir)
+int devtmpfs_mount(void)
{
int err;
@@ -369,7 +369,7 @@ int devtmpfs_mount(const char *mntdir)
if (!thread)
return 0;
- err = ksys_mount("devtmpfs", mntdir, "devtmpfs", MS_SILENT, NULL);
+ err = do_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -394,7 +394,7 @@ static int devtmpfsd(void *p)
*err = ksys_unshare(CLONE_NEWNS);
if (*err)
goto out;
- *err = ksys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ *err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
if (*err)
goto out;
ksys_chdir("/.."); /* will traverse into overmounted root */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7c532548b0a6..cf6b6b722e5c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1325,10 +1325,14 @@ struct device *platform_find_device_by_driver(struct device *start,
}
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
+void __weak __init early_platform_cleanup(void) { }
+
int __init platform_bus_init(void)
{
int error;
+ early_platform_cleanup();
+
error = device_register(&platform_bus);
if (error) {
put_device(&platform_bus);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index e8c5c54e1d26..d6a6adfd5159 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->domid = domid;
atomic_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete);
+
+ /*
+ * Because freeing back to the cache may be deferred, it is not
+ * safe to unload the module (and hence destroy the cache) until
+ * this has completed. To prevent premature unloading, take an
+ * extra module reference here and release only when the object
+ * has been freed back to the cache.
+ */
+ __module_get(THIS_MODULE);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
return blkif;
@@ -320,6 +329,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
+ module_put(THIS_MODULE);
}
int __init xen_blkif_interface_init(void)
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index a60a1be937ad..05834953e1fd 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,6 +53,8 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
+#define APMU_USBHSIC0 0xf8
+#define APMU_USBHSIC1 0xfc
#define MPMU_UART_PLL 0x14
struct mmp2_clk_unit {
@@ -194,6 +196,8 @@ static struct mmp_clk_mix_config sdh_mix_config = {
};
static DEFINE_SPINLOCK(usb_lock);
+static DEFINE_SPINLOCK(usbhsic0_lock);
+static DEFINE_SPINLOCK(usbhsic1_lock);
static DEFINE_SPINLOCK(disp0_lock);
static DEFINE_SPINLOCK(disp1_lock);
@@ -224,6 +228,8 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+ {MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock},
+ {MMP2_CLK_USBHSIC1, "usbhsic1_clk", "usb_pll", 0, APMU_USBHSIC1, 0x1b, 0x1b, 0x0, 0, &usbhsic1_lock},
/* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0005be5ea2b4..33d19c8eb027 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -381,7 +381,8 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
if (dev->states_usage[i].disable)
continue;
- limit_ns = (u64)drv->states[i].target_residency_ns;
+ limit_ns = drv->states[i].target_residency_ns;
+ break;
}
dev->poll_limit_ns = limit_ns;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index c76423aaef4d..ce6a5f80fb83 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -403,6 +403,13 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
mutex_lock(&cpuidle_lock);
+ spin_lock(&cpuidle_driver_lock);
+
+ if (!drv->cpumask) {
+ drv->states[idx].flags |= CPUIDLE_FLAG_UNUSABLE;
+ goto unlock;
+ }
+
for_each_cpu(cpu, drv->cpumask) {
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
@@ -415,5 +422,8 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
}
+unlock:
+ spin_unlock(&cpuidle_driver_lock);
+
mutex_unlock(&cpuidle_lock);
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 425149e8bab0..57f6944d65a6 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -24,11 +24,14 @@
#include <linux/printk.h>
#include <linux/hrtimer.h>
#include <linux/of.h>
+#include <linux/pm_qos.h>
#include "governor.h"
#define CREATE_TRACE_POINTS
#include <trace/events/devfreq.h>
+#define HZ_PER_KHZ 1000
+
static struct class *devfreq_class;
/*
@@ -99,6 +102,54 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq)
}
/**
+ * get_freq_range() - Get the current freq range
+ * @devfreq: the devfreq instance
+ * @min_freq: the min frequency
+ * @max_freq: the max frequency
+ *
+ * This takes into consideration all constraints.
+ */
+static void get_freq_range(struct devfreq *devfreq,
+ unsigned long *min_freq,
+ unsigned long *max_freq)
+{
+ unsigned long *freq_table = devfreq->profile->freq_table;
+ s32 qos_min_freq, qos_max_freq;
+
+ lockdep_assert_held(&devfreq->lock);
+
+ /*
+ * Initialize minimum/maximum frequency from freq table.
+ * The devfreq drivers can initialize this in either ascending or
+ * descending order and devfreq core supports both.
+ */
+ if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
+ *min_freq = freq_table[0];
+ *max_freq = freq_table[devfreq->profile->max_state - 1];
+ } else {
+ *min_freq = freq_table[devfreq->profile->max_state - 1];
+ *max_freq = freq_table[0];
+ }
+
+ /* Apply constraints from PM QoS */
+ qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
+ DEV_PM_QOS_MIN_FREQUENCY);
+ qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
+ DEV_PM_QOS_MAX_FREQUENCY);
+ *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
+ if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
+ *max_freq = min(*max_freq,
+ (unsigned long)HZ_PER_KHZ * qos_max_freq);
+
+ /* Apply constraints from OPP interface */
+ *min_freq = max(*min_freq, devfreq->scaling_min_freq);
+ *max_freq = min(*max_freq, devfreq->scaling_max_freq);
+
+ if (*min_freq > *max_freq)
+ *min_freq = *max_freq;
+}
+
+/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
@@ -351,16 +402,7 @@ int update_devfreq(struct devfreq *devfreq)
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
-
- /*
- * Adjust the frequency with user freq, QoS and available freq.
- *
- * List from the highest priority
- * max_freq
- * min_freq
- */
- max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
- min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
+ get_freq_range(devfreq, &min_freq, &max_freq);
if (freq < min_freq) {
freq = min_freq;
@@ -568,26 +610,69 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
void *devp)
{
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
- int ret;
+ int err = -EINVAL;
mutex_lock(&devfreq->lock);
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
- if (!devfreq->scaling_min_freq) {
- mutex_unlock(&devfreq->lock);
- return -EINVAL;
- }
+ if (!devfreq->scaling_min_freq)
+ goto out;
devfreq->scaling_max_freq = find_available_max_freq(devfreq);
if (!devfreq->scaling_max_freq) {
- mutex_unlock(&devfreq->lock);
- return -EINVAL;
+ devfreq->scaling_max_freq = ULONG_MAX;
+ goto out;
}
- ret = update_devfreq(devfreq);
+ err = update_devfreq(devfreq);
+
+out:
mutex_unlock(&devfreq->lock);
+ if (err)
+ dev_err(devfreq->dev.parent,
+ "failed to update frequency from OPP notifier (%d)\n",
+ err);
- return ret;
+ return NOTIFY_OK;
+}
+
+/**
+ * qos_notifier_call() - Common handler for QoS constraints.
+ * @devfreq: the devfreq instance.
+ */
+static int qos_notifier_call(struct devfreq *devfreq)
+{
+ int err;
+
+ mutex_lock(&devfreq->lock);
+ err = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ if (err)
+ dev_err(devfreq->dev.parent,
+ "failed to update frequency from PM QoS (%d)\n",
+ err);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * qos_min_notifier_call() - Callback for QoS min_freq changes.
+ * @nb: Should be devfreq->nb_min
+ */
+static int qos_min_notifier_call(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
+}
+
+/**
+ * qos_max_notifier_call() - Callback for QoS max_freq changes.
+ * @nb: Should be devfreq->nb_max
+ */
+static int qos_max_notifier_call(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
}
/**
@@ -599,16 +684,36 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
+ int err;
mutex_lock(&devfreq_list_lock);
- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
- mutex_unlock(&devfreq_list_lock);
- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
- return;
- }
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
+ DEV_PM_QOS_MAX_FREQUENCY);
+ if (err && err != -ENOENT)
+ dev_warn(dev->parent,
+ "Failed to remove max_freq notifier: %d\n", err);
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
+ DEV_PM_QOS_MIN_FREQUENCY);
+ if (err && err != -ENOENT)
+ dev_warn(dev->parent,
+ "Failed to remove min_freq notifier: %d\n", err);
+
+ if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
+ err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
+ if (err)
+ dev_warn(dev->parent,
+ "Failed to remove max_freq request: %d\n", err);
+ }
+ if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
+ err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
+ if (err)
+ dev_warn(dev->parent,
+ "Failed to remove min_freq request: %d\n", err);
+ }
+
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
@@ -660,6 +765,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
+ INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
@@ -681,7 +787,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = -EINVAL;
goto err_dev;
}
- devfreq->min_freq = devfreq->scaling_min_freq;
devfreq->scaling_max_freq = find_available_max_freq(devfreq);
if (!devfreq->scaling_max_freq) {
@@ -689,7 +794,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = -EINVAL;
goto err_dev;
}
- devfreq->max_freq = devfreq->scaling_max_freq;
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
atomic_set(&devfreq->suspend_count, 0);
@@ -730,6 +834,28 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
+ err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+ if (err < 0)
+ goto err_devfreq;
+ err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ if (err < 0)
+ goto err_devfreq;
+
+ devfreq->nb_min.notifier_call = qos_min_notifier_call;
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
+ DEV_PM_QOS_MIN_FREQUENCY);
+ if (err)
+ goto err_devfreq;
+
+ devfreq->nb_max.notifier_call = qos_max_notifier_call;
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
+ DEV_PM_QOS_MAX_FREQUENCY);
+ if (err)
+ goto err_devfreq;
+
mutex_lock(&devfreq_list_lock);
governor = try_then_request_governor(devfreq->governor_name);
@@ -1303,41 +1429,37 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long value;
int ret;
+ /*
+ * Protect against theoretical sysfs writes between
+ * device_add and dev_pm_qos_add_request
+ */
+ if (!dev_pm_qos_request_active(&df->user_min_freq_req))
+ return -EAGAIN;
+
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
- mutex_lock(&df->lock);
-
- if (value) {
- if (value > df->max_freq) {
- ret = -EINVAL;
- goto unlock;
- }
- } else {
- unsigned long *freq_table = df->profile->freq_table;
-
- /* Get minimum frequency according to sorting order */
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
- value = freq_table[0];
- else
- value = freq_table[df->profile->max_state - 1];
- }
+ /* Round down to kHz for PM QoS */
+ ret = dev_pm_qos_update_request(&df->user_min_freq_req,
+ value / HZ_PER_KHZ);
+ if (ret < 0)
+ return ret;
- df->min_freq = value;
- update_devfreq(df);
- ret = count;
-unlock:
- mutex_unlock(&df->lock);
- return ret;
+ return count;
}
static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq *df = to_devfreq(dev);
+ unsigned long min_freq, max_freq;
- return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
+ mutex_lock(&df->lock);
+ get_freq_range(df, &min_freq, &max_freq);
+ mutex_unlock(&df->lock);
+
+ return sprintf(buf, "%lu\n", min_freq);
}
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
@@ -1347,33 +1469,37 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long value;
int ret;
+ /*
+ * Protect against theoretical sysfs writes between
+ * device_add and dev_pm_qos_add_request
+ */
+ if (!dev_pm_qos_request_active(&df->user_max_freq_req))
+ return -EINVAL;
+
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
- mutex_lock(&df->lock);
-
- if (value) {
- if (value < df->min_freq) {
- ret = -EINVAL;
- goto unlock;
- }
- } else {
- unsigned long *freq_table = df->profile->freq_table;
+ /*
+ * PM QoS frequencies are in kHz so we need to convert. Convert by
+ * rounding upwards so that the acceptable interval never shrinks.
+ *
+ * For example if the user writes "666666666" to sysfs this value will
+ * be converted to 666667 kHz and back to 666667000 Hz before an OPP
+ * lookup, this ensures that an OPP of 666666666Hz is still accepted.
+ *
+ * A value of zero means "no limit".
+ */
+ if (value)
+ value = DIV_ROUND_UP(value, HZ_PER_KHZ);
+ else
+ value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- /* Get maximum frequency according to sorting order */
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
- value = freq_table[df->profile->max_state - 1];
- else
- value = freq_table[0];
- }
+ ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
+ if (ret < 0)
+ return ret;
- df->max_freq = value;
- update_devfreq(df);
- ret = count;
-unlock:
- mutex_unlock(&df->lock);
- return ret;
+ return count;
}
static DEVICE_ATTR_RW(min_freq);
@@ -1381,8 +1507,13 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq *df = to_devfreq(dev);
+ unsigned long min_freq, max_freq;
+
+ mutex_lock(&df->lock);
+ get_freq_range(df, &min_freq, &max_freq);
+ mutex_unlock(&df->lock);
- return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
+ return sprintf(buf, "%lu\n", max_freq);
}
static DEVICE_ATTR_RW(max_freq);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 76fb072c22dc..5a5a1da01a00 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
a_fences = get_fences(a, &a_num_fences);
b_fences = get_fences(b, &b_num_fences);
if (a_num_fences > INT_MAX - b_num_fences)
- return NULL;
+ goto err;
num_fences = a_num_fences + b_num_fences;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index d101f072c8f8..407816da9fcb 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -681,7 +681,7 @@ device_initcall(efi_load_efivars);
{ name }, \
{ prop }, \
offsetof(struct efi_fdt_params, field), \
- FIELD_SIZEOF(struct efi_fdt_params, field) \
+ sizeof_field(struct efi_fdt_params, field) \
}
struct params {
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index d968c2471412..0d12ebf66174 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 2e98c016cb47..9375e7f12420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 2cdaf3b2a721..6614d8a6f4c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -604,11 +604,8 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
- for (i = 0; i < num_entities; i++) {
- mutex_lock(&ctx->adev->lock_reset);
+ for (i = 0; i < num_entities; i++)
drm_sched_entity_fini(&ctx->entities[0][i].entity);
- mutex_unlock(&ctx->adev->lock_reset);
- }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 16fbd2bc8ad1..4043ebcea5de 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -268,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
u32 tmp;
- /* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
- } else {
- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- tmp |= DF_V3_6_MGCG_DISABLE;
- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
- }
+ if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
+ /* Put DF on broadcast mode */
+ adev->df_funcs->enable_broadcast_mode(adev, true);
+
+ if (enable) {
+ tmp = RREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
+ WREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ } else {
+ tmp = RREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V3_6_MGCG_DISABLE;
+ WREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ }
- /* Exit broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ /* Exit broadcast mode */
+ adev->df_funcs->enable_broadcast_mode(adev, false);
+ }
}
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f2c1b026397b..ba9e53a1abc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -117,10 +117,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -162,10 +165,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 983db77999e7..52a647d7022d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6146,7 +6146,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
- /* EVENT_WRITE_EOP - flush caches, send int */
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ amdgpu_ring_write(ring, lower_32_bits(seq - 1));
+ amdgpu_ring_write(ring, upper_32_bits(seq - 1));
+
+ /* Then send the real EOP event down the pipe:
+ * EVENT_WRITE_EOP - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
@@ -6888,7 +6904,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
- 8 + /* FENCE for VM_FLUSH */
+ 12 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
the first COND_EXEC jump to the place just
@@ -6900,7 +6916,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
- 8 + 8 + /* FENCE x2 */
+ 12 + 12 + /* FENCE x2 */
2, /* SWITCH_BUFFER */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 232469507446..f5725336a5f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -219,6 +219,21 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
return req;
}
+/**
+ * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+ uint32_t vmhub)
+{
+ return ((vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) &&
+ (!amdgpu_sriov_vf(adev)));
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -229,6 +244,7 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
unsigned int vmhub, uint32_t flush_type)
{
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
/* Use register 17 for GART */
@@ -244,8 +260,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1) {
+ if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
/* a read return value of 1 means semaphore acuqire */
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
@@ -278,8 +293,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
}
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
@@ -369,6 +383,7 @@ error_alloc:
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
@@ -381,8 +396,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
- ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/* a read return value of 1 means semaphore acuqire */
amdgpu_ring_emit_reg_wait(ring,
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
@@ -398,8 +412,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
req, 1 << vmid);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
- ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3c355fb5d2b4..a5b68b5e452f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -416,6 +416,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
return req;
}
+/**
+ * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+ uint32_t vmhub)
+{
+ return ((vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) &&
+ (!amdgpu_sriov_vf(adev)) &&
+ (!(adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id < 0x8 &&
+ adev->pdev->device == 0x15d8)));
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -435,6 +453,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
+ bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
u32 j, tmp;
struct amdgpu_vmhub *hub;
@@ -468,8 +487,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1) {
+ if (use_semaphore) {
for (j = 0; j < adev->usec_timeout; j++) {
/* a read return value of 1 means semaphore acuqire */
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
@@ -499,8 +517,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
@@ -518,6 +535,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
+ bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
@@ -531,8 +549,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
- ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/* a read return value of 1 means semaphore acuqire */
amdgpu_ring_emit_reg_wait(ring,
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
@@ -548,8 +565,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
req, 1 << vmid);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
- ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ if (use_semaphore)
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ba0e68057a89..b3672d10ea54 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
#
# Heterogenous system architecture configuration
#
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 313183b80032..ae161fe86ebb 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
menu "Display Engine Configuration"
depends on DRM && DRM_AMDGPU
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 7873abea4112..5c3fcaa47410 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1625,6 +1625,7 @@ static enum bp_result construct_integrated_info(
/* Don't need to check major revision as they are all 1 */
switch (revision.minor) {
case 11:
+ case 12:
result = get_integrated_info_v11(bp, info);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 790a2d211bd6..35c55e54eac0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
}
+static bool rn_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+
+ return true;
+}
+
+
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
.init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
- /* .dump_clk_registers = rn_dump_clk_registers, */
+ .are_clock_states_equal = rn_are_clock_states_equal,
.notify_wm_ranges = rn_notify_wm_ranges
};
@@ -518,36 +534,83 @@ struct clk_bw_params rn_bw_params = {
.num_entries = 4,
},
- .wm_table = {
- .entries = {
- {
- .wm_inst = WM_A,
- .wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .valid = true,
- },
- {
- .wm_inst = WM_B,
- .wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .valid = true,
- },
- {
- .wm_inst = WM_C,
- .wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .valid = true,
- },
- {
- .wm_inst = WM_D,
- .wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .valid = true,
- },
+};
+
+struct wm_table ddr4_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 6.09,
+ .sr_enter_plus_exit_time_us = 7.14,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
},
}
};
+struct wm_table lpddr4_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 23.84,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 23.84,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 23.84,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 23.84,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
+ .valid = true,
+ },
+ }
+};
+
+
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
@@ -561,7 +624,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi
return 0;
}
-static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
{
int i, j = 0;
@@ -593,8 +656,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
- bw_params->vram_type = asic_id->vram_type;
- bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
+ bw_params->vram_type = bios_info->memory_type;
+ bw_params->num_channels = bios_info->ma_channel_number;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -669,15 +732,24 @@ void rn_clk_mgr_construct(
ASSERT(clk_mgr->base.dprefclk_khz == 600000);
clk_mgr->base.dprefclk_khz = 600000;
}
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
+ rn_bw_params.wm_table = lpddr4_wm_table;
+ } else {
+ rn_bw_params.wm_table = ddr4_wm_table;
+ }
}
dce_clock_read_ss_info(clk_mgr);
+
clk_mgr->base.bw_params = &rn_bw_params;
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ }
}
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 12ba6fdf89b7..62d8289abb4e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
if (GPIO_RESULT_OK != dal_ddc_open(
ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
- dal_gpio_destroy_ddc(&ddc);
+ dal_ddc_close(ddc);
return present;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 7f904d55c1bc..81789191d4ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -586,7 +586,7 @@ bool dal_ddc_service_query_ddc_data(
bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
struct aux_payload *payload)
{
- uint8_t retrieved = 0;
+ uint32_t retrieved = 0;
bool ret = 0;
if (!ddc)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 0f59b68aa4c2..504055fc70e8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3522,7 +3522,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
if (link_enc->funcs->fec_set_enable &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
if (link->fec_state == dc_link_fec_ready && enable) {
- msleep(1);
+ /* Accord to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+ * time for transmitting 1000 LL codes which is 6.173 us.
+ * So use 7 microseconds delay instead.
+ */
+ udelay(7);
link_enc->funcs->fec_set_enable(link_enc, true);
link->fec_state = dc_link_fec_enabled;
} else if (link->fec_state == dc_link_fec_enabled && !enable) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index e472608faf33..793c0cec407f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -583,6 +583,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
uint8_t reply;
bool payload_reply = true;
enum aux_channel_operation_result operation_result;
+ bool retry_on_defer = false;
+
int aux_ack_retries = 0,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
@@ -613,8 +615,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
- case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ retry_on_defer = true;
+ /* fall through */
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
} else {
@@ -647,15 +651,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
- if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
- goto fail;
- else {
- /*
- * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
- * According to the DP spec there should be 3 retries total
- * with a 400us wait inbetween each. Hardware already waits
- * for 550us therefore no wait is required here.
- */
+ // Check whether a DEFER had occurred before the timeout.
+ // If so, treat timeout as a DEFER.
+ if (retry_on_defer) {
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ else if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ } else {
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 63f3bddba7da..10b47986526b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 09793336d84f..23ff2f1c75b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -923,7 +923,9 @@ static const struct resource_caps res_cap_nv14 = {
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 5,
+#endif
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -1536,13 +1538,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
static void acquire_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
{
int i;
ASSERT(*dsc == NULL);
*dsc = NULL;
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1585,7 +1594,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
+ acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
@@ -1785,7 +1794,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc);
+ acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 4b3401616434..fcb3877b4fcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank(
DP_VID_N_MUL, n_multiply);
}
- /* set DIG_START to 0x1 to reset FIFO */
+ /* make sure stream is disabled before resetting steer fifo */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
+ /* set DIG_START to 0x1 to reset FIFO */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
/* write 0 to take the FIFO out of reset */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
- /* switch DP encoder to CRTC data */
+ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
+ * that it overflows during mode transition, and sometimes doesn't recover.
+ */
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+ udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 14113ccf498d..5b8c17564bc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for DCN21.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 459bd9a5caed..b29b2c99a564 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
@@ -257,7 +259,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3550,
+ .dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
@@ -1000,6 +1002,8 @@ static void calculate_wm_set_for_vlevel(
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
@@ -1017,14 +1021,21 @@ static void calculate_wm_set_for_vlevel(
static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
+ int i;
+
kernel_fpu_begin();
if (dc->bb_overrides.sr_exit_time_ns) {
- bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
}
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- bb->sr_enter_plus_exit_time_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
}
if (dc->bb_overrides.urgent_latency_ns) {
@@ -1032,9 +1043,12 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
}
if (dc->bb_overrides.dram_clock_change_latency_ns) {
- bb->dram_clock_change_latency_us =
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
}
+
kernel_fpu_end();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 970737217e53..641ffb7cfaed 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 4e18e77dcf42..026e6a2a2c44 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -69,6 +69,8 @@ struct wm_range_table_entry {
unsigned int wm_inst;
unsigned int wm_type;
double pstate_latency_us;
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
bool valid;
};
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index bb012cb1a9f5..c7fbb9c3ad6b 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -42,7 +42,7 @@ struct aux_payload {
bool write;
bool mot;
uint32_t address;
- uint8_t length;
+ uint32_t length;
uint8_t *data;
/*
* used to return the reply type of the transaction
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 16e69bbc69aa..5437b50e9f90 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_MAX_MARGIN 2500
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
+ unsigned int min_frame_duration_in_ns = 0;
+ unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
- unsigned int max_render_time_in_us =
- in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ in_out_vrr->max_refresh_in_uhz)));
/* Program BTR */
- if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
+ if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
+ } else if (last_render_time_in_us > max_render_time_in_us) {
/* Enter Below the Range */
- if (!in_out_vrr->btr.btr_active) {
- in_out_vrr->btr.btr_active = true;
- }
+ in_out_vrr->btr.btr_active = true;
}
/* BTR set to "not active" so disengage */
@@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
- (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
- mid_point_frames_floor < 2)) {
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- max_render_time_in_us) &&
+ in_out_vrr->max_duration_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
- in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
- 2 * in_out_vrr->min_duration_in_us;
- if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
- in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-
in_out_vrr->supported = true;
}
@@ -816,7 +811,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
-
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dbe7835aabcf..dc187844d10b 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,7 +92,6 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
- uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index ce3566ca3e24..cc71a1078a7a 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1313,12 +1313,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
"VR",
"COMPUTE",
"CUSTOM"};
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)"};
uint32_t i, size = 0;
int16_t workload_type = 0;
if (!smu->pm_enabled || !buf)
return -EINVAL;
+ size += sprintf(buf + size, "%16s\n",
+ title[0]);
+
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c7c2b349858d..2a27fb5d7dc6 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3986,6 +3986,7 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ crtc_state->cpu_transcoder,
(u8)conn_state->hdcp_content_type);
}
@@ -4089,7 +4090,9 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
content_protection_type_changed)
- intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
+ intel_hdcp_enable(connector,
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 050655a1a3d8..b05b2191b919 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2414,9 +2414,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
- intel_hdcp_transcoder_config(intel_connector,
- pipe_config->cpu_transcoder);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 3111ecaeabd0..20616639b8ab 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
return 0;
/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 0;
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index f1f41ca8402b..a448815d8fc2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1821,23 +1821,6 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
}
-void intel_hdcp_transcoder_config(struct intel_connector *connector,
- enum transcoder cpu_transcoder)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
-
- if (!hdcp->shim)
- return;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- mutex_lock(&hdcp->mutex);
- hdcp->cpu_transcoder = cpu_transcoder;
- hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
- mutex_unlock(&hdcp->mutex);
- }
-}
-
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
@@ -1959,8 +1942,10 @@ int intel_hdcp_init(struct intel_connector *connector,
return 0;
}
-int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
+int intel_hdcp_enable(struct intel_connector *connector,
+ enum transcoder cpu_transcoder, u8 content_type)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
int ret = -EINVAL;
@@ -1972,6 +1957,11 @@ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ hdcp->cpu_transcoder = cpu_transcoder;
+ hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ }
+
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
* is capable of HDCP2.2, it is preferred to use HDCP2.2.
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 41c1053d9e38..f3c3272e712a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -21,11 +21,10 @@ enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-void intel_hdcp_transcoder_config(struct intel_connector *connector,
- enum transcoder cpu_transcoder);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
+int intel_hdcp_enable(struct intel_connector *connector,
+ enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index f6f5312205c4..f56fffc474fa 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2489,9 +2489,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
- intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
- pipe_config->cpu_transcoder);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9fdefbdc3546..75dd0e0367b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -845,12 +845,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
}
}
-static void unwind_wa_tail(struct i915_request *rq)
-{
- rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
- assert_ring_tail_valid(rq->ring, rq->tail);
-}
-
static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
@@ -863,12 +857,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests,
sched.link) {
-
if (i915_request_completed(rq))
continue; /* XXX */
__i915_request_unsubmit(rq);
- unwind_wa_tail(rq);
/*
* Push the request back into the queue for later resubmission.
@@ -1161,13 +1153,29 @@ execlists_schedule_out(struct i915_request *rq)
i915_request_put(rq);
}
-static u64 execlists_update_context(const struct i915_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->hw_context;
- u64 desc;
+ u64 desc = ce->lrc_desc;
+ u32 tail;
- ce->lrc_reg_state[CTX_RING_TAIL] =
- intel_ring_set_tail(rq->ring, rq->tail);
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ */
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
/*
* Make sure the context image is complete before we submit it to HW.
@@ -1186,13 +1194,11 @@ static u64 execlists_update_context(const struct i915_request *rq)
*/
mb();
- desc = ce->lrc_desc;
- ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
-
/* Wa_1607138340:tgl */
if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
}
@@ -1703,16 +1709,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
return;
}
-
- /*
- * WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == rq:TAIL as we resubmit the
- * request. See gen8_emit_fini_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
}
}
@@ -4120,17 +4116,18 @@ static void virtual_context_destroy(struct kref *kref)
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
+ unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irq(&sibling->active.lock);
+ spin_lock_irqsave(&sibling->active.lock, flags);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irq(&sibling->active.lock);
+ spin_unlock_irqrestore(&sibling->active.lock, flags);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9eb6b3149b7..d034fa413164 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,6 +45,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -1053,6 +1054,18 @@ out:
return err;
}
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ intel_context_timeline_unlock(tl);
+ return 0;
+}
+
static int __intel_engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -1121,13 +1134,20 @@ err_rq:
if (!rq)
continue;
- /* We want to be able to unbind the state from the GGTT */
- GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
-
+ GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
+ &rq->hw_context->flags));
state = rq->hw_context->state;
if (!state)
continue;
+ /* Serialise with retirement on another CPU */
+ err = __intel_context_flush_retire(rq->hw_context);
+ if (err)
+ goto out;
+
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 65d7c2e599de..2ae14bc14931 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2078,20 +2078,12 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
u32 *reg_state = ce->lrc_reg_state;
int i;
- if (IS_GEN(stream->perf->i915, 12)) {
- u32 format = stream->oa_buffer.format;
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
- reg_state[ctx_oactxctrl + 1] =
- (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
- (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
- } else {
- reg_state[ctx_oactxctrl + 1] =
- (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME;
- }
-
- for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
+ for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
reg_state[ctx_flexeu0 + i * 2 + 1] =
oa_config_flex_reg(stream->oa_config, flex_regs[i]);
@@ -2224,34 +2216,51 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
-static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
{
- struct i915_request *rq;
- u32 *cs;
- int err = 0;
-
- rq = i915_request_create(ce);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out;
- }
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
- *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
- *cs++ = MI_NOOP;
+ int err;
+ struct intel_context *ce = stream->pinned_ctx;
+ u32 format = stream->oa_buffer.format;
+ struct flex regs_context[] = {
+ {
+ GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
+ enable ? GEN8_OA_COUNTER_RESUME : 0,
+ },
+ };
+ /* Offsets in regs_lri are not used since this configuration is only
+ * applied using LRI. Initialize the correct offsets for posterity.
+ */
+#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
+ struct flex regs_lri[] = {
+ {
+ GEN12_OAR_OACONTROL,
+ GEN12_OAR_OACONTROL_OFFSET + 1,
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ },
+ {
+ RING_CONTEXT_CONTROL(ce->engine->mmio_base),
+ CTX_CONTEXT_CONTROL,
+ _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ enable ?
+ GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
+ 0)
+ },
+ };
- intel_ring_advance(rq, cs);
+ /* Modify the context image of pinned context with regs_context*/
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ return err;
-out:
- i915_request_add(rq);
+ err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
+ intel_context_unlock_pinned(ce);
+ if (err)
+ return err;
- return err;
+ /* Apply regs_lri using LRI with pinned context */
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
}
/*
@@ -2277,53 +2286,16 @@ out:
* per-context OA state.
*
* Note: it's only the RCS/Render context that has any OA state.
+ * Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs)
{
struct drm_i915_private *i915 = stream->perf->i915;
- /* The MMIO offsets for Flex EU registers aren't contiguous */
- const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
-#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
- struct flex regs[] = {
- {
- GEN8_R_PWR_CLK_STATE,
- CTX_R_PWR_CLK_STATE,
- },
- {
- IS_GEN(i915, 12) ?
- GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
- stream->perf->ctx_oactxctrl_offset + 1,
- },
- { EU_PERF_CNTL0, ctx_flexeuN(0) },
- { EU_PERF_CNTL1, ctx_flexeuN(1) },
- { EU_PERF_CNTL2, ctx_flexeuN(2) },
- { EU_PERF_CNTL3, ctx_flexeuN(3) },
- { EU_PERF_CNTL4, ctx_flexeuN(4) },
- { EU_PERF_CNTL5, ctx_flexeuN(5) },
- { EU_PERF_CNTL6, ctx_flexeuN(6) },
- };
-#undef ctx_flexeuN
struct intel_engine_cs *engine;
struct i915_gem_context *ctx, *cn;
- size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
- int i, err;
-
- if (IS_GEN(i915, 12)) {
- u32 format = stream->oa_buffer.format;
-
- regs[1].value =
- (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
- (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
- } else {
- regs[1].value =
- (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME;
- }
-
- for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
- regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+ int err;
lockdep_assert_held(&stream->perf->lock);
@@ -2353,7 +2325,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
spin_unlock(&i915->gem.contexts.lock);
- err = gen8_configure_context(ctx, regs, array_size);
+ err = gen8_configure_context(ctx, regs, num_regs);
if (err) {
i915_gem_context_put(ctx);
return err;
@@ -2378,7 +2350,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, array_size);
+ err = gen8_modify_self(ce, regs, num_regs);
if (err)
return err;
}
@@ -2386,6 +2358,56 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
+static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
+{
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ };
+
+ return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+}
+
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
+{
+ /* The MMIO offsets for Flex EU registers aren't contiguous */
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ {
+ GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
+ },
+ { EU_PERF_CNTL0, ctx_flexeuN(0) },
+ { EU_PERF_CNTL1, ctx_flexeuN(1) },
+ { EU_PERF_CNTL2, ctx_flexeuN(2) },
+ { EU_PERF_CNTL3, ctx_flexeuN(3) },
+ { EU_PERF_CNTL4, ctx_flexeuN(4) },
+ { EU_PERF_CNTL5, ctx_flexeuN(5) },
+ { EU_PERF_CNTL6, ctx_flexeuN(6) },
+ };
+#undef ctx_flexeuN
+ int i;
+
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+
+ for (i = 2; i < ARRAY_SIZE(regs); i++)
+ regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+ return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+}
+
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2464,7 +2486,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = lrc_configure_all_contexts(stream, oa_config);
+ ret = gen12_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
@@ -2474,8 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
* requested this.
*/
if (stream->ctx) {
- ret = gen12_emit_oar_config(stream->pinned_ctx,
- oa_config != NULL);
+ ret = gen12_configure_oar_context(stream, true);
if (ret)
return ret;
}
@@ -2509,11 +2530,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ gen12_configure_all_contexts(stream, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
- gen12_emit_oar_config(stream->pinned_ctx, false);
+ gen12_configure_oar_context(stream, false);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2713,7 +2734,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
+ if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
+ (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
DRM_DEBUG("Only OA report sampling supported\n");
return -EINVAL;
}
@@ -2745,7 +2767,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
format_size = perf->oa_formats[props->oa_format].size;
- stream->sample_flags |= SAMPLE_OA_REPORT;
+ stream->sample_flags = props->sample_flags;
stream->sample_size += format_size;
stream->oa_buffer.format_size = format_size;
@@ -2854,7 +2876,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
return;
stream = engine->i915->perf.exclusive_stream;
- if (stream)
+ /*
+ * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
+ * is already doing that, so nothing to be done for gen12 here.
+ */
+ if (stream && INTEL_GEN(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index d6214d3c8b33..ef4c630afe3f 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
for_each_available_child_of_node(dev->of_node, child) {
panel = of_drm_find_panel(child);
if (IS_ERR(panel)) {
- dev_err(dev, "failed to find panel try bridge (%lu)\n",
+ dev_err(dev, "failed to find panel try bridge (%ld)\n",
PTR_ERR(panel));
+ panel = NULL;
+
bridge = of_drm_find_bridge(child);
if (IS_ERR(bridge)) {
- dev_err(dev, "failed to find bridge (%lu)\n",
+ dev_err(dev, "failed to find bridge (%ld)\n",
PTR_ERR(bridge));
return PTR_ERR(bridge);
}
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 9ab27aecfcf3..1bd6b6d15ffb 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
},
};
+static const struct meson_cvbs_mode *
+meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
+{
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_match(req_mode, &meson_mode->mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS))
+ return meson_mode;
+ }
+
+ return NULL;
+}
+
/* Connector */
static void meson_cvbs_connector_destroy(struct drm_connector *connector)
@@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- int i;
-
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
-
- if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
- return 0;
- }
+ if (meson_cvbs_get_mode(&crtc_state->mode))
+ return 0;
return -EINVAL;
}
@@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
struct meson_venc_cvbs *meson_venc_cvbs =
encoder_to_meson_venc_cvbs(encoder);
struct meson_drm *priv = meson_venc_cvbs->priv;
- int i;
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+ if (meson_mode) {
+ meson_venci_cvbs_mode_set(priv, meson_mode->enci);
- if (drm_mode_equal(mode, &meson_mode->mode)) {
- meson_venci_cvbs_mode_set(priv,
- meson_mode->enci);
-
- /* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, true);
- break;
- }
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index d43951caeea0..b113876c2428 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -30,9 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
static struct drm_driver driver;
static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
- { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 43df86c38f58..24f7700768da 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -114,6 +114,7 @@ struct nv50_head_atom {
u8 nhsync:1;
u8 nvsync:1;
u8 depth:4;
+ u8 bpc;
} or;
/* Currently only used for MST */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 549486f1d937..63425e246018 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
* same size as the native one (e.g. different
* refresh rate)
*/
- if (adjusted_mode->hdisplay == native_mode->hdisplay &&
- adjusted_mode->vdisplay == native_mode->vdisplay &&
- adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay &&
+ mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
@@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct nouveau_connector *nv_connector =
- nouveau_connector(conn_state->connector);
- return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
- nv_connector->native_mode);
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
+ if (ret)
+ return ret;
+
+ if (crtc_state->mode_changed || crtc_state->connectors_changed)
+ asyh->or.bpc = connector->display_info.bpc;
+
+ return 0;
}
/******************************************************************************
@@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+ if (ret)
+ return ret;
+
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ return 0;
+
+ /*
+ * When restoring duplicated states, we need to make sure that the bw
+ * remains the same and avoid recalculating it, as the connector's bpc
+ * may have changed after the state was duplicated
+ */
+ if (!state->duplicated) {
+ const int clock = crtc_state->adjusted_mode.clock;
- if (crtc_state->mode_changed || crtc_state->connectors_changed) {
/*
- * When restoring duplicated states, we need to make sure that
- * the bw remains the same and avoid recalculating it, as the
- * connector's bpc may have changed after the state was
- * duplicated
+ * XXX: Since we don't use HDR in userspace quite yet, limit
+ * the bpc to 8 to save bandwidth on the topology. In the
+ * future, we'll want to properly fix this by dynamically
+ * selecting the highest possible bpc that would fit in the
+ * topology
*/
- if (!state->duplicated) {
- const int bpp = connector->display_info.bpc * 3;
- const int clock = crtc_state->adjusted_mode.clock;
+ asyh->or.bpc = min(connector->display_info.bpc, 8U);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+ }
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
- }
+ slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
+ asyh->dp.pbn);
+ if (slots < 0)
+ return slots;
- slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
- mstc->port,
- asyh->dp.pbn);
- if (slots < 0)
- return slots;
+ asyh->dp.tu = slots;
- asyh->dp.tu = slots;
- }
+ return 0;
+}
- return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
- mstc->native);
+static u8
+nv50_dp_bpc_to_depth(unsigned int bpc)
+{
+ switch (bpc) {
+ case 6: return 0x2;
+ case 8: return 0x5;
+ case 10: /* fall-through */
+ default: return 0x6;
+ }
}
static void
@@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- u8 proto, depth;
+ u8 proto;
bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
@@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
else
proto = 0x9;
- switch (mstc->connector.display_info.bpc) {
- case 6: depth = 0x2; break;
- case 8: depth = 0x5; break;
- case 10:
- default: depth = 0x6; break;
- }
-
- mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
+ mstm->outp->update(mstm->outp, head->base.index, armh, proto,
+ nv50_dp_bpc_to_depth(armh->or.bpc));
msto->head = head;
msto->mstc = mstc;
@@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder)
lvds.lvds.script |= 0x0200;
}
- if (nv_connector->base.display_info.bpc == 8)
+ if (asyh->or.bpc == 8)
lvds.lvds.script |= 0x0200;
}
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6)
- depth = 0x2;
- else
- if (nv_connector->base.display_info.bpc == 8)
- depth = 0x5;
- else
- depth = 0x6;
+ depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->link & 1)
proto = 0x8;
@@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
nv50_outp_acquire(nv_encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_connector->base.display_info.bpc) {
+ switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
case 6: asyh->or.depth = 0x2; break;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 71c23bf1fe25..c9692df2b76c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
struct nv50_head_atom *asyh,
struct nouveau_conn_atom *asyc)
{
- struct drm_connector *connector = asyc->state.connector;
u32 mode = 0x00;
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
- if (asyh->base.depth > connector->display_info.bpc * 3)
+ if (asyh->base.depth > asyh->or.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = asyc->dither.mode;
}
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
+ if (asyh->or.bpc >= 8)
mode |= DITHERING_DEPTH_8BPC;
} else {
mode |= asyc->dither.depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5b413588b823..9a9a7f5003d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
void
nouveau_conn_reset(struct drm_connector *connector)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_conn_atom *asyc;
- if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
- return;
+ if (drm_drv_uses_atomic_modeset(connector->dev)) {
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ nouveau_conn_atomic_destroy_state(connector,
+ connector->state);
+
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ } else {
+ asyc = &nv_connector->properties_state;
+ }
- if (connector->state)
- nouveau_conn_atomic_destroy_state(connector, connector->state);
- __drm_atomic_helper_connector_reset(connector, &asyc->state);
asyc->dither.mode = DITHERING_MODE_AUTO;
asyc->dither.depth = DITHERING_DEPTH_AUTO;
asyc->scaler.mode = DRM_MODE_SCALE_NONE;
@@ -276,8 +284,14 @@ void
nouveau_conn_attach_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_conn_atom *armc;
+
+ if (drm_drv_uses_atomic_modeset(connector->dev))
+ armc = nouveau_conn_atom(connector->state);
+ else
+ armc = &nv_connector->properties_state;
/* Init DVI-I specific properties. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
@@ -748,9 +762,9 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index f43a8d63aef8..de84fb4708c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -29,6 +29,7 @@
#include <nvif/notify.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
@@ -44,6 +45,60 @@ struct dcb_output;
struct nouveau_backlight;
#endif
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -63,6 +118,12 @@ struct nouveau_connector {
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight *backlight;
#endif
+ /*
+ * Our connector property code expects a nouveau_conn_atom struct
+ * even on pre-nv50 where we do not support atomic. This embedded
+ * version gets used in the non atomic modeset case.
+ */
+ struct nouveau_conn_atom properties_state;
};
static inline struct nouveau_connector *nouveau_connector(
@@ -121,61 +182,6 @@ extern int nouveau_ignorelid;
extern int nouveau_duallink;
extern int nouveau_hdmimhz;
-#include <drm/drm_crtc.h>
-#define nouveau_conn_atom(p) \
- container_of((p), struct nouveau_conn_atom, state)
-
-struct nouveau_conn_atom {
- struct drm_connector_state state;
-
- struct {
- /* The enum values specifically defined here match nv50/gf119
- * hw values, and the code relies on this.
- */
- enum {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
- } mode;
- enum {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
- } depth;
- } dither;
-
- struct {
- int mode; /* DRM_MODE_SCALE_* */
- struct {
- enum {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
- } mode;
- u32 hborder;
- u32 vborder;
- } underscan;
- bool full;
- } scaler;
-
- struct {
- int color_vibrance;
- int vibrant_hue;
- } procamp;
-
- union {
- struct {
- bool dither:1;
- bool scaler:1;
- bool procamp:1;
- };
- u8 mask;
- } set;
-};
-
void nouveau_conn_attach_properties(struct drm_connector *);
void nouveau_conn_reset(struct drm_connector *);
struct drm_connector_state *
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 4c4e8a30a1ac..536ba93b0f46 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -18,15 +18,18 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
int err;
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
err = dev_pm_opp_set_rate(dev, *freq);
if (err)
return err;
- *freq = clk_get_rate(pfdev->clock);
-
return 0;
}
@@ -60,20 +63,10 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
return 0;
}
-static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
-
- *freq = clk_get_rate(pfdev->clock);
-
- return 0;
-}
-
static struct devfreq_dev_profile panfrost_devfreq_profile = {
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
- .get_cur_freq = panfrost_devfreq_get_cur_freq,
};
int panfrost_devfreq_init(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 9458dc6c750c..f61364f7c471 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -303,14 +303,17 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
}
/* Don't allow mmapping of heap objects as pages are not pinned. */
- if (to_panfrost_bo(gem_obj)->is_heap)
- return -EINVAL;
+ if (to_panfrost_bo(gem_obj)->is_heap) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_put_unlocked(gem_obj);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
@@ -347,20 +350,19 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ mutex_lock(&pfdev->shrinker_lock);
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
- mutex_lock(&pfdev->shrinker_lock);
-
if (args->madv == PANFROST_MADV_DONTNEED)
- list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+ list_add_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
-
- mutex_unlock(&pfdev->shrinker_lock);
}
+ mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
return 0;
@@ -443,7 +445,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
{
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
- panfrost_perfcnt_close(panfrost_priv);
+ panfrost_perfcnt_close(file);
panfrost_job_close(panfrost_priv);
panfrost_mmu_pgtable_free(panfrost_priv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index deca0c30bbd4..fd766b1395fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,6 +19,16 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
+ /*
+ * Make sure the BO is no longer inserted in the shrinker list before
+ * taking care of the destruction itself. If we don't do that we have a
+ * race condition between this function and what's done in
+ * panfrost_gem_shrinker_scan().
+ */
+ mutex_lock(&pfdev->shrinker_lock);
+ list_del_init(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -33,15 +43,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
kfree(bo->sgts);
}
- mutex_lock(&pfdev->shrinker_lock);
- if (!list_empty(&bo->base.madv_list))
- list_del(&bo->base.madv_list);
- mutex_unlock(&pfdev->shrinker_lock);
-
drm_gem_shmem_free_object(obj);
}
-static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
size_t size = obj->size;
@@ -80,7 +85,7 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
return ret;
}
-static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 50920819cc16..4b17e7308764 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -45,6 +45,10 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
u32 flags,
uint32_t *handle);
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void panfrost_gem_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2dba192bf198..2c04e858c50a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -67,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
}
static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user,
+ struct drm_file *file_priv,
unsigned int counterset)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
u32 cfg;
@@ -91,14 +92,14 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
perfcnt->bo = to_panfrost_bo(&bo->base);
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_mmu_map(perfcnt->bo);
+ ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
if (ret)
goto err_put_bo;
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_put_bo;
+ goto err_close_bo;
}
/*
@@ -157,14 +158,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
err_vunmap:
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+err_close_bo:
+ panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
}
static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user)
+ struct drm_file *file_priv)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
if (user != perfcnt->user)
@@ -180,6 +184,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
perfcnt->buf = NULL;
+ panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
perfcnt->bo = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
@@ -191,7 +196,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
@@ -207,10 +211,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
mutex_lock(&perfcnt->lock);
if (req->enable)
- ret = panfrost_perfcnt_enable_locked(pfdev, pfile,
+ ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
req->counterset);
else
- ret = panfrost_perfcnt_disable_locked(pfdev, pfile);
+ ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
return ret;
@@ -248,15 +252,16 @@ out:
return ret;
}
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile)
+void panfrost_perfcnt_close(struct drm_file *file_priv)
{
+ struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
pm_runtime_get_sync(pfdev->dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
- panfrost_perfcnt_disable_locked(pfdev, pfile);
+ panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
index 13b8fdaa1b43..8bbcf5f5fb33 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
@@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
int panfrost_perfcnt_init(struct panfrost_device *pfdev);
void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile);
+void panfrost_perfcnt_close(struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9333c865d4a9..9f8dcd3f8385 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -896,29 +896,6 @@ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address
}
EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
-/**
- * i2c_new_dummy - return a new i2c device bound to a dummy driver
- * @adapter: the adapter managing the device
- * @address: seven bit address to be used
- * Context: can sleep
- *
- * This deprecated function has the same functionality as @i2c_new_dummy_device,
- * it just returns NULL instead of an ERR_PTR in case of an error for
- * compatibility with current I2C API. It will be removed once all users are
- * converted.
- *
- * This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
- */
-struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
-{
- struct i2c_client *ret;
-
- ret = i2c_new_dummy_device(adapter, address);
- return IS_ERR(ret) ? NULL : ret;
-}
-EXPORT_SYMBOL_GPL(i2c_new_dummy);
-
struct i2c_dummy_devres {
struct i2c_client *client;
};
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 7b837641f166..7320275c7e56 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -992,6 +992,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#define ST_ACCEL_TRIGGER_OPS NULL
#endif
+#ifdef CONFIG_ACPI
static const struct iio_mount_matrix *
get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
@@ -1012,7 +1013,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = {
static int apply_acpi_orientation(struct iio_dev *indio_dev,
struct iio_chan_spec *channels)
{
-#ifdef CONFIG_ACPI
struct st_sensor_data *adata = iio_priv(indio_dev);
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_device *adev;
@@ -1140,10 +1140,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
out:
kfree(buffer.pointer);
return ret;
+}
#else /* !CONFIG_ACPI */
+static int apply_acpi_orientation(struct iio_dev *indio_dev,
+ struct iio_chan_spec *channels)
+{
return 0;
-#endif
}
+#endif
/*
* st_accel_get_settings() - get sensor settings from device name
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index edc6f1cc90b2..3f03abf100b5 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -39,6 +39,8 @@
#define AD7124_STATUS_POR_FLAG_MSK BIT(4)
/* AD7124_ADC_CONTROL */
+#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8)
+#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
#define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6)
#define AD7124_ADC_CTRL_PWR(x) FIELD_PREP(AD7124_ADC_CTRL_PWR_MSK, x)
#define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2)
@@ -424,7 +426,10 @@ static int ad7124_init_channel_vref(struct ad7124_state *st,
break;
case AD7124_INT_REF:
st->channel_config[channel_number].vref_mv = 2500;
- break;
+ st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK;
+ st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
+ return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL,
+ 2, st->adc_control);
default:
dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
return -EINVAL;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index f5ba94c03a8d..e4683a68522a 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -85,7 +85,7 @@ err_unlock:
static int ad7606_read_samples(struct ad7606_state *st)
{
- unsigned int num = st->chip_info->num_channels;
+ unsigned int num = st->chip_info->num_channels - 1;
u16 *data = st->data;
int ret;
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index 5c2b3446fa4a..2c6f60edb7ce 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -89,6 +89,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
unsigned int channel)
{
int ret;
+ int i;
int bits_per_word = ad7949_adc->resolution;
int mask = GENMASK(ad7949_adc->resolution, 0);
struct spi_message msg;
@@ -100,12 +101,23 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
},
};
- ret = ad7949_spi_write_cfg(ad7949_adc,
- channel << AD7949_OFFSET_CHANNEL_SEL,
- AD7949_MASK_CHANNEL_SEL);
- if (ret)
- return ret;
+ /*
+ * 1: write CFG for sample N and read old data (sample N-2)
+ * 2: if CFG was not changed since sample N-1 then we'll get good data
+ * at the next xfer, so we bail out now, otherwise we write something
+ * and we read garbage (sample N-1 configuration).
+ */
+ for (i = 0; i < 2; i++) {
+ ret = ad7949_spi_write_cfg(ad7949_adc,
+ channel << AD7949_OFFSET_CHANNEL_SEL,
+ AD7949_MASK_CHANNEL_SEL);
+ if (ret)
+ return ret;
+ if (channel == ad7949_adc->current_channel)
+ break;
+ }
+ /* 3: write something and read actual data */
ad7949_adc->buffer = 0;
spi_message_init_with_transfers(&msg, tx, 1);
ret = spi_sync(ad7949_adc->spi, &msg);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index 67d096f8180d..c35a1beb817c 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -185,7 +185,7 @@ static int mrfld_adc_probe(struct platform_device *pdev)
int irq;
int ret;
- indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct mrfld_adc));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index e171db20c04a..02834ca3e1ce 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -478,7 +478,13 @@ static int max1027_probe(struct spi_device *spi)
st->trig->ops = &max1027_trigger_ops;
st->trig->dev.parent = &spi->dev;
iio_trigger_set_drvdata(st->trig, indio_dev);
- iio_trigger_register(st->trig);
+ ret = devm_iio_trigger_register(&indio_dev->dev,
+ st->trig);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Failed to register iio trigger\n");
+ return ret;
+ }
ret = devm_request_threaded_irq(&spi->dev, spi->irq,
iio_trigger_generic_data_rdy_poll,
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index da073d72f649..e480529b3f04 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -89,6 +89,12 @@
#define MAX9611_TEMP_SCALE_NUM 1000000
#define MAX9611_TEMP_SCALE_DIV 2083
+/*
+ * Conversion time is 2 ms (typically) at Ta=25 degreeC
+ * No maximum value is known, so play it safe.
+ */
+#define MAX9611_CONV_TIME_US_RANGE 3000, 3300
+
struct max9611_dev {
struct device *dev;
struct i2c_client *i2c_client;
@@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611,
return ret;
}
- /*
- * need a delay here to make register configuration
- * stabilize. 1 msec at least, from empirical testing.
- */
- usleep_range(1000, 2000);
+ /* need a delay here to make register configuration stabilize. */
+
+ usleep_range(MAX9611_CONV_TIME_US_RANGE);
ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
if (ret < 0) {
@@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611)
MAX9611_REG_CTRL2, 0);
return ret;
}
- usleep_range(1000, 2000);
+ usleep_range(MAX9611_CONV_TIME_US_RANGE);
return 0;
}
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 963ff043eecf..7ecd2ffa3132 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -229,7 +229,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
*val2 = 65536;
return IIO_VAL_FRACTIONAL;
} else {
- *val = 100;
+ *val = 100000;
*val2 = 65536;
return IIO_VAL_FRACTIONAL;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 45e77b308238..0686e41bb8a1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -117,6 +117,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
+ .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
},
{
.whoami = INV_MPU6500_WHOAMI_VALUE,
@@ -124,6 +125,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
.whoami = INV_MPU6515_WHOAMI_VALUE,
@@ -131,6 +133,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
.whoami = INV_MPU6000_WHOAMI_VALUE,
@@ -138,6 +141,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
+ .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
},
{
.whoami = INV_MPU9150_WHOAMI_VALUE,
@@ -145,6 +149,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
+ .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
},
{
.whoami = INV_MPU9250_WHOAMI_VALUE,
@@ -152,6 +157,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
.whoami = INV_MPU9255_WHOAMI_VALUE,
@@ -159,6 +165,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
.whoami = INV_ICM20608_WHOAMI_VALUE,
@@ -166,6 +173,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
},
{
.whoami = INV_ICM20602_WHOAMI_VALUE,
@@ -173,6 +181,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_icm20602,
.config = &chip_config_6050,
.fifo_size = 1008,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
},
};
@@ -481,12 +490,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- if (st->chip_type == INV_ICM20602)
- *val2 = INV_ICM20602_TEMP_SCALE;
- else
- *val2 = INV_MPU6050_TEMP_SCALE;
-
+ *val = st->hw->temp.scale / 1000000;
+ *val2 = st->hw->temp.scale % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_MAGN:
return inv_mpu_magn_get_scale(st, chan, val, val2);
@@ -496,11 +501,7 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
- if (st->chip_type == INV_ICM20602)
- *val = INV_ICM20602_TEMP_OFFSET;
- else
- *val = INV_MPU6050_TEMP_OFFSET;
-
+ *val = st->hw->temp.offset;
return IIO_VAL_INT;
default:
return -EINVAL;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f1fb7b6bdab1..b096e010d4ee 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -107,6 +107,7 @@ struct inv_mpu6050_chip_config {
* @reg: register map of the chip.
* @config: configuration of the chip.
* @fifo_size: size of the FIFO in bytes.
+ * @temp: offset and scale to apply to raw temperature.
*/
struct inv_mpu6050_hw {
u8 whoami;
@@ -114,6 +115,10 @@ struct inv_mpu6050_hw {
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
size_t fifo_size;
+ struct {
+ int offset;
+ int scale;
+ } temp;
};
/*
@@ -279,16 +284,19 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_UP_TIME_MIN 5000
#define INV_MPU6050_REG_UP_TIME_MAX 10000
-#define INV_MPU6050_TEMP_OFFSET 12421
-#define INV_MPU6050_TEMP_SCALE 2941
+#define INV_MPU6050_TEMP_OFFSET 12420
+#define INV_MPU6050_TEMP_SCALE 2941176
#define INV_MPU6050_MAX_GYRO_FS_PARAM 3
#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
#define INV_MPU6050_THREE_AXIS 3
#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
-#define INV_ICM20602_TEMP_OFFSET 8170
-#define INV_ICM20602_TEMP_SCALE 3060
+#define INV_MPU6500_TEMP_OFFSET 7011
+#define INV_MPU6500_TEMP_SCALE 2995178
+
+#define INV_ICM20608_TEMP_OFFSET 8170
+#define INV_ICM20608_TEMP_SCALE 3059976
/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
#define INV_MPU6050_OUTPUT_DATA_SIZE 32
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index c605b153be41..dc55d7dff3eb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -320,7 +320,6 @@ enum st_lsm6dsx_fifo_mode {
* @odr: Output data rate of the sensor [Hz].
* @watermark: Sensor watermark level.
* @sip: Number of samples in a given pattern.
- * @decimator: FIFO decimation factor.
* @ts_ref: Sensor timestamp reference for hw one.
* @ext_info: Sensor settings if it is connected to i2c controller
*/
@@ -334,7 +333,6 @@ struct st_lsm6dsx_sensor {
u16 watermark;
u8 sip;
- u8 decimator;
s64 ts_ref;
struct {
@@ -351,9 +349,9 @@ struct st_lsm6dsx_sensor {
* @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
* @conf_lock: Mutex to prevent concurrent FIFO configuration update.
* @page_lock: Mutex to prevent concurrent memory page configuration.
- * @fifo_mode: FIFO operating mode supported by the device.
* @suspend_mask: Suspended sensor bitmask.
* @enable_mask: Enabled sensor bitmask.
+ * @fifo_mask: Enabled hw FIFO bitmask.
* @ts_gain: Hw timestamp rate after internal calibration.
* @ts_sip: Total number of timestamp samples in a given pattern.
* @sip: Total number of samples (acc/gyro/ts) in a given pattern.
@@ -373,9 +371,9 @@ struct st_lsm6dsx_hw {
struct mutex conf_lock;
struct mutex page_lock;
- enum st_lsm6dsx_fifo_mode fifo_mode;
u8 suspend_mask;
u8 enable_mask;
+ u8 fifo_mask;
s64 ts_gain;
u8 ts_sip;
u8 sip;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index d416990ae309..cb536b81a1c2 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -78,14 +78,20 @@ struct st_lsm6dsx_decimator_entry st_lsm6dsx_decimator_table[] = {
{ 32, 0x7 },
};
-static int st_lsm6dsx_get_decimator_val(u8 val)
+static int
+st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr)
{
const int max_size = ARRAY_SIZE(st_lsm6dsx_decimator_table);
+ u32 decimator = max_odr / sensor->odr;
int i;
- for (i = 0; i < max_size; i++)
- if (st_lsm6dsx_decimator_table[i].decimator == val)
+ if (decimator > 1)
+ decimator = round_down(decimator, 2);
+
+ for (i = 0; i < max_size; i++) {
+ if (st_lsm6dsx_decimator_table[i].decimator == decimator)
break;
+ }
return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val;
}
@@ -111,6 +117,13 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
}
}
+static u8 st_lsm6dsx_get_sip(struct st_lsm6dsx_sensor *sensor, u32 min_odr)
+{
+ u8 sip = sensor->odr / min_odr;
+
+ return sip > 1 ? round_down(sip, 2) : sip;
+}
+
static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
{
const struct st_lsm6dsx_reg *ts_dec_reg;
@@ -131,12 +144,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
sensor = iio_priv(hw->iio_devs[i]);
/* update fifo decimators and sample in pattern */
if (hw->enable_mask & BIT(sensor->id)) {
- sensor->sip = sensor->odr / min_odr;
- sensor->decimator = max_odr / sensor->odr;
- data = st_lsm6dsx_get_decimator_val(sensor->decimator);
+ sensor->sip = st_lsm6dsx_get_sip(sensor, min_odr);
+ data = st_lsm6dsx_get_decimator_val(sensor, max_odr);
} else {
sensor->sip = 0;
- sensor->decimator = 0;
data = 0;
}
ts_sip = max_t(u16, ts_sip, sensor->sip);
@@ -176,17 +187,10 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode)
{
unsigned int data;
- int err;
data = FIELD_PREP(ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
- err = st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
- ST_LSM6DSX_FIFO_MODE_MASK, data);
- if (err < 0)
- return err;
-
- hw->fifo_mode = fifo_mode;
-
- return 0;
+ return st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_MODE_MASK, data);
}
static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
@@ -608,11 +612,17 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
{
struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 fifo_mask;
int err;
mutex_lock(&hw->conf_lock);
- if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) {
+ if (enable)
+ fifo_mask = hw->fifo_mask | BIT(sensor->id);
+ else
+ fifo_mask = hw->fifo_mask & ~BIT(sensor->id);
+
+ if (hw->fifo_mask) {
err = st_lsm6dsx_flush_fifo(hw);
if (err < 0)
goto out;
@@ -642,15 +652,19 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
if (err < 0)
goto out;
- if (hw->enable_mask) {
+ if (fifo_mask) {
/* reset hw ts counter */
err = st_lsm6dsx_reset_hw_ts(hw);
if (err < 0)
goto out;
err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
+ if (err < 0)
+ goto out;
}
+ hw->fifo_mask = fifo_mask;
+
out:
mutex_unlock(&hw->conf_lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 11b2c7bc8041..a7d40c02ce6b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1447,8 +1447,9 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
return st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask, data);
}
-int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
- bool enable)
+static int
+__st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
+ bool enable)
{
struct st_lsm6dsx_hw *hw = sensor->hw;
u32 odr = enable ? sensor->odr : 0;
@@ -1466,6 +1467,26 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
return 0;
}
+static int
+st_lsm6dsx_check_events(struct st_lsm6dsx_sensor *sensor, bool enable)
+{
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (sensor->id == ST_LSM6DSX_ID_GYRO || enable)
+ return 0;
+
+ return hw->enable_event;
+}
+
+int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
+ bool enable)
+{
+ if (st_lsm6dsx_check_events(sensor, enable))
+ return 0;
+
+ return __st_lsm6dsx_sensor_set_enable(sensor, enable);
+}
+
static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
u8 addr, int *val)
{
@@ -1661,7 +1682,7 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
u8 enable_event;
- int err = 0;
+ int err;
if (type != IIO_EV_TYPE_THRESH)
return -EINVAL;
@@ -1689,7 +1710,8 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
return err;
mutex_lock(&hw->conf_lock);
- err = st_lsm6dsx_sensor_set_enable(sensor, state);
+ if (enable_event || !(hw->fifo_mask & BIT(sensor->id)))
+ err = __st_lsm6dsx_sensor_set_enable(sensor, state);
mutex_unlock(&hw->conf_lock);
if (err < 0)
return err;
@@ -2300,7 +2322,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
hw->suspend_mask |= BIT(sensor->id);
}
- if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS)
+ if (hw->fifo_mask)
err = st_lsm6dsx_flush_fifo(hw);
return err;
@@ -2336,7 +2358,7 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
hw->suspend_mask &= ~BIT(sensor->id);
}
- if (hw->enable_mask)
+ if (hw->fifo_mask)
err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
return err;
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index ddf47023364b..d39c0d6b77f1 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -444,8 +444,10 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
else
temp = __convert_to_raw(temp, resolution);
} else {
- of_property_read_u32_index(np, propname, index,
- (u32 *)&temp);
+ u32 t32;
+
+ of_property_read_u32_index(np, propname, index, &t32);
+ temp = t32;
}
for (j = 0; j < n_size; j++)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 25f2b70fd8ef..43a6f07e0afe 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4763,6 +4763,7 @@ err_ib:
err:
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
+ unregister_pernet_subsys(&cma_pernet_operations);
err_wq:
destroy_workqueue(cma_wq);
return ret;
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 8434ec082c3a..2257d7f7810f 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -286,6 +286,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
struct rdma_counter *counter;
int ret;
+ if (!qp->res.valid)
+ return 0;
+
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
index f509c478b469..b7cb59844ece 100644
--- a/drivers/infiniband/core/ib_core_uverbs.c
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -238,28 +238,32 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
/**
- * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa
+ * in a given range.
*
* @ucontext: associated user context.
* @entry: the entry to insert into the mmap_xa
* @length: length of the address that will be mmapped
+ * @min_pgoff: minimum pgoff to be returned
+ * @max_pgoff: maximum pgoff to be returned
*
* This function should be called by drivers that use the rdma_user_mmap
* interface for implementing their mmap syscall A database of mmap offsets is
* handled in the core and helper functions are provided to insert entries
* into the database and extract entries when the user calls mmap with the
- * given offset. The function allocates a unique page offset that should be
- * provided to user, the user will use the offset to retrieve information such
- * as address to be mapped and how.
+ * given offset. The function allocates a unique page offset in a given range
+ * that should be provided to user, the user will use the offset to retrieve
+ * information such as address to be mapped and how.
*
* Return: 0 on success and -ENOMEM on failure
*/
-int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
- struct rdma_user_mmap_entry *entry,
- size_t length)
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 min_pgoff,
+ u32 max_pgoff)
{
struct ib_uverbs_file *ufile = ucontext->ufile;
- XA_STATE(xas, &ucontext->mmap_xa, 0);
+ XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
u32 xa_first, xa_last, npages;
int err;
u32 i;
@@ -285,7 +289,7 @@ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
entry->npages = npages;
while (true) {
/* First find an empty index */
- xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+ xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
goto err_unlock;
@@ -332,4 +336,30 @@ err_unlock:
mutex_unlock(&ufile->umap_lock);
return -ENOMEM;
}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa.
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for handling user mmapped addresses. The database is handled in
+ * the core and helper functions are provided to insert entries into the
+ * database and extract entries when the user calls mmap with the given offset.
+ * The function allocates a unique page offset that should be provided to user,
+ * the user will use the offset to retrieve information such as address to
+ * be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
+ U32_MAX);
+}
EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index c9d294caa27a..50c22575aed6 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -145,7 +145,7 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev)
}
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
- FIELD_SIZEOF(typeof(x), fld) <= (sz))
+ sizeof_field(typeof(x), fld) <= (sz))
#define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved))
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 5774dfc22e18..a51525647ac8 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -848,7 +848,7 @@ static const struct rhashtable_params sdma_rht_params = {
.nelem_hint = NR_CPUS_HINT,
.head_offset = offsetof(struct sdma_rht_node, node),
.key_offset = offsetof(struct sdma_rht_node, cpu_id),
- .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
+ .key_len = sizeof_field(struct sdma_rht_node, cpu_id),
.max_size = NR_CPUS,
.min_size = 8,
.automatic_shrinking = true,
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index b0e9bf7cd150..d36e3e14896d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -107,9 +107,9 @@ enum {
HFI1_HAS_GRH = (1 << 0),
};
-#define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh))
+#define LRH_16B_BYTES (sizeof_field(struct hfi1_16b_header, lrh))
#define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32))
-#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
+#define LRH_9B_BYTES (sizeof_field(struct ib_header, lrh))
#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
/* 24Bits for qpn, upper 8Bits reserved */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0b5dc1d5928f..34055cbab38c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3018,16 +3018,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->ib_active = false;
flush_workqueue(wq);
- mlx4_ib_close_sriov(ibdev);
- mlx4_ib_mad_cleanup(ibdev);
- ib_unregister_device(&ibdev->ib_dev);
- mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
+ mlx4_ib_close_sriov(ibdev);
+ mlx4_ib_mad_cleanup(ibdev);
+ ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
+
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap);
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 4937947400cd..4c26492ab8a3 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -157,7 +157,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
return -ENOMEM;
}
-int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
{
struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
@@ -175,15 +175,13 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
MLX5_SET(dealloc_memic_in, in, memic_size, length);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return;
- if (!err) {
- spin_lock(&dm->lock);
- bitmap_clear(dm->memic_alloc_pages,
- start_page_idx, num_pages);
- spin_unlock(&dm->lock);
- }
-
- return err;
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ start_page_idx, num_pages);
+ spin_unlock(&dm->lock);
}
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 169cab4915e3..945ebce73613 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -46,7 +46,7 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
-int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 51100350b688..997cbfe4b90c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2074,6 +2074,24 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
virt_to_page(dev->mdev->clock_info));
}
+static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
+{
+ struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
+ struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+ struct mlx5_ib_dm *mdm;
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
+ mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
+ mdm->size);
+ kfree(mdm);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
@@ -2186,26 +2204,55 @@ free_bfreg:
return err;
}
-static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+ struct mlx5_ib_dm *mdm,
+ u64 address)
+{
+ mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
+ mdm->mentry.address = address;
+ return rdma_user_mmap_entry_insert_range(
+ context, &mdm->mentry.rdma_entry,
+ mdm->size,
+ MLX5_IB_MMAP_DEVICE_MEM << 16,
+ (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
+{
+ unsigned long idx;
+ u8 command;
+
+ command = get_command(vma->vm_pgoff);
+ idx = get_extended_index(vma->vm_pgoff);
+
+ return (command << 16 | idx);
+}
+
+static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct ib_ucontext *ucontext)
{
- struct mlx5_ib_ucontext *mctx = to_mucontext(context);
- struct mlx5_ib_dev *dev = to_mdev(context->device);
- u16 page_idx = get_extended_index(vma->vm_pgoff);
- size_t map_size = vma->vm_end - vma->vm_start;
- u32 npages = map_size >> PAGE_SHIFT;
+ struct mlx5_user_mmap_entry *mentry;
+ struct rdma_user_mmap_entry *entry;
+ unsigned long pgoff;
+ pgprot_t prot;
phys_addr_t pfn;
+ int ret;
- if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
- page_idx + npages)
+ pgoff = mlx5_vma_to_pgoff(vma);
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
+ if (!entry)
return -EINVAL;
- pfn = ((dev->mdev->bar_addr +
- MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
- PAGE_SHIFT) +
- page_idx;
- return rdma_user_mmap_io(context, vma, pfn, map_size,
- pgprot_writecombine(vma->vm_page_prot),
- NULL);
+ mentry = to_mmmap(entry);
+ pfn = (mentry->address >> PAGE_SHIFT);
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = rdma_user_mmap_io(ucontext, vma, pfn,
+ entry->npages * PAGE_SIZE,
+ prot,
+ entry);
+ rdma_user_mmap_entry_put(&mentry->rdma_entry);
+ return ret;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,11 +2295,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
- case MLX5_IB_MMAP_DEVICE_MEM:
- return dm_mmap(ibcontext, vma);
-
default:
- return -EINVAL;
+ return mlx5_ib_mmap_offset(dev, vma, ibcontext);
}
return 0;
@@ -2288,8 +2332,9 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
{
struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
u64 start_offset;
- u32 page_idx;
+ u16 page_idx;
int err;
+ u64 address;
dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
@@ -2298,28 +2343,30 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
if (err)
return err;
- page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
- PAGE_SHIFT;
+ address = dm->dev_addr & PAGE_MASK;
+ err = add_dm_mmap_entry(ctx, dm, address);
+ if (err)
+ goto err_dealloc;
+ page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx, sizeof(page_idx));
+ &page_idx,
+ sizeof(page_idx));
if (err)
- goto err_dealloc;
+ goto err_copy;
start_offset = dm->dev_addr & ~PAGE_MASK;
err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&start_offset, sizeof(start_offset));
if (err)
- goto err_dealloc;
-
- bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
- DIV_ROUND_UP(dm->size, PAGE_SIZE));
+ goto err_copy;
return 0;
+err_copy:
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
err_dealloc:
mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
@@ -2423,23 +2470,13 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
- struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm);
- u32 page_idx;
int ret;
switch (dm->type) {
case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
- if (ret)
- return ret;
-
- page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
- PAGE_SHIFT;
- bitmap_clear(ctx->dm_pages, page_idx,
- DIV_ROUND_UP(dm->size, PAGE_SIZE));
- break;
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+ return 0;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
dm->size, ctx->devx_uid, dm->dev_addr,
@@ -3544,10 +3581,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
INIT_LIST_HEAD(&handler->list);
- if (dst) {
- memcpy(&dest_arr[0], dst, sizeof(*dst));
- dest_num++;
- }
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
err = parse_flow_attr(dev->mdev, spec,
@@ -3560,6 +3593,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
+ if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
+ memcpy(&dest_arr[0], dst, sizeof(*dst));
+ dest_num++;
+ }
+
if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn);
@@ -3600,10 +3638,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
+ if (!dest_num)
rule_dst = NULL;
- dest_num = 0;
- }
} else {
if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -6236,6 +6272,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.map_mr_sg = mlx5_ib_map_mr_sg,
.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
.mmap = mlx5_ib_mmap,
+ .mmap_free = mlx5_ib_mmap_free,
.modify_cq = mlx5_ib_modify_cq,
.modify_device = mlx5_ib_modify_device,
.modify_port = mlx5_ib_modify_port,
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 5986953ec2fa..b06f32ff5748 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -118,6 +118,10 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
};
+enum mlx5_ib_mmap_type {
+ MLX5_IB_MMAP_TYPE_MEMIC = 1,
+};
+
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
(MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -135,7 +139,6 @@ struct mlx5_ib_ucontext {
u32 tdn;
u64 lib_caps;
- DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid;
/* For RoCE LAG TX affinity */
atomic_t tx_port_affinity;
@@ -556,6 +559,12 @@ enum mlx5_ib_mtt_access_flags {
MLX5_IB_MTT_WRITE = (1 << 1),
};
+struct mlx5_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u8 mmap_flag;
+ u64 address;
+};
+
struct mlx5_ib_dm {
struct ib_dm ibdm;
phys_addr_t dev_addr;
@@ -567,6 +576,7 @@ struct mlx5_ib_dm {
} icm_dm;
/* other dm types specific params should be added here */
};
+ struct mlx5_user_mmap_entry mentry;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -1101,6 +1111,13 @@ to_mflow_act(struct ib_flow_action *ibact)
return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
}
+static inline struct mlx5_user_mmap_entry *
+to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry,
+ struct mlx5_user_mmap_entry, rdma_entry);
+}
+
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct mlx5_db *db);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index f9a492ed900b..831ad578a7b2 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb)
calc_icrc = rxe_icrc_hdr(pkt, skb);
calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
- payload_size(pkt));
+ payload_size(pkt) + bth_pad(pkt));
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
if (skb->protocol == htons(ETH_P_IPV6))
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index c5d9b558fa90..e5031172c019 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (err)
return err;
}
+ if (bth_pad(pkt)) {
+ u8 *pad = payload_addr(pkt) + paylen;
+
+ memset(pad, 0, bth_pad(pkt));
+ crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
+ }
}
p = payload_addr(pkt) + paylen + bth_pad(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 1cbfbd98eb22..c4a8195bf670 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (err)
pr_err("Failed copying memory\n");
+ if (bth_pad(&ack_pkt)) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u8 *pad = payload_addr(&ack_pkt) + payload;
+
+ memset(pad, 0, bth_pad(&ack_pkt));
+ icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
+ }
p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
*p = ~icrc;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index 62390e9e0023..8ad7da989a0e 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -63,7 +63,7 @@ struct vnic_stats {
};
};
-#define VNIC_STAT(m) { FIELD_SIZEOF(struct opa_vnic_stats, m), \
+#define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \
offsetof(struct opa_vnic_stats, m) }
static struct vnic_stats vnic_gstrings_stats[] = {
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index c49afbea3458..2f9304d1db49 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -6,13 +6,13 @@ config INTERCONNECT_QCOM
Support for Qualcomm's Network-on-Chip interconnect hardware.
config INTERCONNECT_QCOM_MSM8974
- tristate "Qualcomm MSM8974 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on QCOM_SMD_RPM
- select INTERCONNECT_QCOM_SMD_RPM
- help
- This is a driver for the Qualcomm Network-on-Chip on msm8974-based
- platforms.
+ tristate "Qualcomm MSM8974 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+ platforms.
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index ce599a0c83d9..bf8bd1aee358 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -652,7 +652,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct icc_node *node;
+ struct icc_node *node, *tmp;
size_t num_nodes, i;
int ret;
@@ -732,7 +732,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
return 0;
err_del_icc:
- list_for_each_entry(node, &provider->nodes, node_list) {
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
icc_node_del(node);
icc_node_destroy(node->id);
}
@@ -748,9 +748,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
struct icc_provider *provider = &qp->provider;
- struct icc_node *n;
+ struct icc_node *n, *tmp;
- list_for_each_entry(n, &provider->nodes, node_list) {
+ list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
icc_node_del(n);
icc_node_destroy(n->id);
}
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index b4966d8f3348..8e0735a87040 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -414,7 +414,7 @@ static int qnoc_probe(struct platform_device *pdev)
struct icc_provider *provider;
struct qcom_icc_node **qnodes;
struct qcom_icc_provider *qp;
- struct icc_node *node;
+ struct icc_node *node, *tmp;
size_t num_nodes, i;
int ret;
@@ -494,7 +494,7 @@ static int qnoc_probe(struct platform_device *pdev)
return 0;
err:
- list_for_each_entry(node, &provider->nodes, node_list) {
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
icc_node_del(node);
icc_node_destroy(node->id);
}
@@ -508,9 +508,9 @@ static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
struct icc_provider *provider = &qp->provider;
- struct icc_node *n;
+ struct icc_node *n, *tmp;
- list_for_each_entry(n, &provider->nodes, node_list) {
+ list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
icc_node_del(n);
icc_node_destroy(n->id);
}
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 502a6c22b41e..387267ee9648 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -868,9 +868,9 @@ static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
struct icc_provider *provider = &qp->provider;
- struct icc_node *n;
+ struct icc_node *n, *tmp;
- list_for_each_entry(n, &provider->nodes, node_list) {
+ list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
icc_node_del(n);
icc_node_destroy(n->id);
}
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 08c552e5e41b..c05b12110456 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -67,23 +67,34 @@ struct superblock_disk {
* To save constantly doing look ups on disk we keep an in core copy of the
* on-disk bitmap, the region_map.
*
- * To further reduce metadata I/O overhead we use a second bitmap, the dmap
- * (dirty bitmap), which tracks the dirty words, i.e. longs, of the region_map.
+ * In order to track which regions are hydrated during a metadata transaction,
+ * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two
+ * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap
+ * tracks the regions that got hydrated during the current metadata
+ * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of
+ * the dirty_regions bitmap.
+ *
+ * This allows us to precisely track the regions that were hydrated during the
+ * current metadata transaction and update the metadata accordingly, when we
+ * commit the current transaction. This is important because dm-clone should
+ * only commit the metadata of regions that were properly flushed to the
+ * destination device beforehand. Otherwise, in case of a crash, we could end
+ * up with a corrupted dm-clone device.
*
* When a region finishes hydrating dm-clone calls
* dm_clone_set_region_hydrated(), or for discard requests
* dm_clone_cond_set_range(), which sets the corresponding bits in region_map
* and dmap.
*
- * During a metadata commit we scan the dmap for dirty region_map words (longs)
- * and update accordingly the on-disk metadata. Thus, we don't have to flush to
- * disk the whole region_map. We can just flush the dirty region_map words.
+ * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions
+ * and update the on-disk metadata accordingly. Thus, we don't have to flush to
+ * disk the whole region_map. We can just flush the dirty region_map bits.
*
- * We use a dirty bitmap, which is smaller than the original region_map, to
- * reduce the amount of memory accesses during a metadata commit. As dm-bitset
- * accesses the on-disk bitmap in 64-bit word granularity, there is no
- * significant benefit in tracking the dirty region_map bits with a smaller
- * granularity.
+ * We use the helper dmap->dirty_words bitmap, which is smaller than the
+ * original region_map, to reduce the amount of memory accesses during a
+ * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in
+ * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk
+ * accesses.
*
* We could update directly the on-disk bitmap, when dm-clone calls either
* dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this
@@ -92,12 +103,13 @@ struct superblock_disk {
* e.g., in a hooked overwrite bio's completion routine, and further reduce the
* I/O completion latency.
*
- * We maintain two dirty bitmaps. During a metadata commit we atomically swap
- * the currently used dmap with the unused one. This allows the metadata update
- * functions to run concurrently with an ongoing commit.
+ * We maintain two dirty bitmap sets. During a metadata commit we atomically
+ * swap the currently used dmap with the unused one. This allows the metadata
+ * update functions to run concurrently with an ongoing commit.
*/
struct dirty_map {
unsigned long *dirty_words;
+ unsigned long *dirty_regions;
unsigned int changed;
};
@@ -115,6 +127,9 @@ struct dm_clone_metadata {
struct dirty_map dmap[2];
struct dirty_map *current_dmap;
+ /* Protected by lock */
+ struct dirty_map *committing_dmap;
+
/*
* In core copy of the on-disk bitmap to save constantly doing look ups
* on disk.
@@ -461,34 +476,53 @@ static size_t bitmap_size(unsigned long nr_bits)
return BITS_TO_LONGS(nr_bits) * sizeof(long);
}
-static int dirty_map_init(struct dm_clone_metadata *cmd)
+static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
+ unsigned long nr_regions)
{
- cmd->dmap[0].changed = 0;
- cmd->dmap[0].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
+ dmap->changed = 0;
- if (!cmd->dmap[0].dirty_words) {
- DMERR("Failed to allocate dirty bitmap");
+ dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL);
+ if (!dmap->dirty_words)
+ return -ENOMEM;
+
+ dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL);
+ if (!dmap->dirty_regions) {
+ kvfree(dmap->dirty_words);
return -ENOMEM;
}
- cmd->dmap[1].changed = 0;
- cmd->dmap[1].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
+ return 0;
+}
+
+static void __dirty_map_exit(struct dirty_map *dmap)
+{
+ kvfree(dmap->dirty_words);
+ kvfree(dmap->dirty_regions);
+}
+
+static int dirty_map_init(struct dm_clone_metadata *cmd)
+{
+ if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) {
+ DMERR("Failed to allocate dirty bitmap");
+ return -ENOMEM;
+ }
- if (!cmd->dmap[1].dirty_words) {
+ if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) {
DMERR("Failed to allocate dirty bitmap");
- kvfree(cmd->dmap[0].dirty_words);
+ __dirty_map_exit(&cmd->dmap[0]);
return -ENOMEM;
}
cmd->current_dmap = &cmd->dmap[0];
+ cmd->committing_dmap = NULL;
return 0;
}
static void dirty_map_exit(struct dm_clone_metadata *cmd)
{
- kvfree(cmd->dmap[0].dirty_words);
- kvfree(cmd->dmap[1].dirty_words);
+ __dirty_map_exit(&cmd->dmap[0]);
+ __dirty_map_exit(&cmd->dmap[1]);
}
static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
@@ -633,21 +667,23 @@ unsigned long dm_clone_find_next_unhydrated_region(struct dm_clone_metadata *cmd
return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start);
}
-static int __update_metadata_word(struct dm_clone_metadata *cmd, unsigned long word)
+static int __update_metadata_word(struct dm_clone_metadata *cmd,
+ unsigned long *dirty_regions,
+ unsigned long word)
{
int r;
unsigned long index = word * BITS_PER_LONG;
unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG);
while (index < max_index) {
- if (test_bit(index, cmd->region_map)) {
+ if (test_bit(index, dirty_regions)) {
r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root,
index, &cmd->bitset_root);
-
if (r) {
DMERR("dm_bitset_set_bit failed");
return r;
}
+ __clear_bit(index, dirty_regions);
}
index++;
}
@@ -721,7 +757,7 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
if (word == cmd->nr_words)
break;
- r = __update_metadata_word(cmd, word);
+ r = __update_metadata_word(cmd, dmap->dirty_regions, word);
if (r)
return r;
@@ -743,15 +779,17 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
return 0;
}
-int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
+int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
{
- int r = -EPERM;
+ int r = 0;
struct dirty_map *dmap, *next_dmap;
down_write(&cmd->lock);
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ r = -EPERM;
goto out;
+ }
/* Get current dirty bitmap */
dmap = cmd->current_dmap;
@@ -763,7 +801,7 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
* The last commit failed, so we don't have a clean dirty-bitmap to
* use.
*/
- if (WARN_ON(next_dmap->changed)) {
+ if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) {
r = -EINVAL;
goto out;
}
@@ -773,11 +811,33 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
cmd->current_dmap = next_dmap;
spin_unlock_irq(&cmd->bitmap_lock);
- /*
- * No one is accessing the old dirty bitmap anymore, so we can flush
- * it.
- */
- r = __flush_dmap(cmd, dmap);
+ /* Set old dirty bitmap as currently committing */
+ cmd->committing_dmap = dmap;
+out:
+ up_write(&cmd->lock);
+
+ return r;
+}
+
+int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
+{
+ int r = -EPERM;
+
+ down_write(&cmd->lock);
+
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
+ goto out;
+
+ if (WARN_ON(!cmd->committing_dmap)) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = __flush_dmap(cmd, cmd->committing_dmap);
+ if (!r) {
+ /* Clear committing dmap */
+ cmd->committing_dmap = NULL;
+ }
out:
up_write(&cmd->lock);
@@ -802,6 +862,7 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
dmap = cmd->current_dmap;
__set_bit(word, dmap->dirty_words);
+ __set_bit(region_nr, dmap->dirty_regions);
__set_bit(region_nr, cmd->region_map);
dmap->changed = 1;
@@ -830,6 +891,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
if (!test_bit(region_nr, cmd->region_map)) {
word = region_nr / BITS_PER_LONG;
__set_bit(word, dmap->dirty_words);
+ __set_bit(region_nr, dmap->dirty_regions);
__set_bit(region_nr, cmd->region_map);
dmap->changed = 1;
}
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 3fe50a781c11..14af1ebd853f 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -75,7 +75,23 @@ void dm_clone_metadata_close(struct dm_clone_metadata *cmd);
/*
* Commit dm-clone metadata to disk.
+ *
+ * We use a two phase commit:
+ *
+ * 1. dm_clone_metadata_pre_commit(): Prepare the current transaction for
+ * committing. After this is called, all subsequent metadata updates, done
+ * through either dm_clone_set_region_hydrated() or
+ * dm_clone_cond_set_range(), will be part of the **next** transaction.
+ *
+ * 2. dm_clone_metadata_commit(): Actually commit the current transaction to
+ * disk and start a new transaction.
+ *
+ * This allows dm-clone to flush the destination device after step (1) to
+ * ensure that all freshly hydrated regions, for which we are updating the
+ * metadata, are properly written to non-volatile storage and won't be lost in
+ * case of a crash.
*/
+int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd);
int dm_clone_metadata_commit(struct dm_clone_metadata *cmd);
/*
@@ -112,6 +128,7 @@ int dm_clone_metadata_abort(struct dm_clone_metadata *cmd);
* Switches metadata to a read only mode. Once read-only mode has been entered
* the following functions will return -EPERM:
*
+ * dm_clone_metadata_pre_commit()
* dm_clone_metadata_commit()
* dm_clone_set_region_hydrated()
* dm_clone_cond_set_range()
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index b3d89072d21c..d1e1b5b56b1b 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -86,6 +86,12 @@ struct clone {
struct dm_clone_metadata *cmd;
+ /*
+ * bio used to flush the destination device, before committing the
+ * metadata.
+ */
+ struct bio flush_bio;
+
/* Region hydration hash table */
struct hash_table_bucket *ht;
@@ -1108,10 +1114,13 @@ static bool need_commit_due_to_time(struct clone *clone)
/*
* A non-zero return indicates read-only or fail mode.
*/
-static int commit_metadata(struct clone *clone)
+static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
{
int r = 0;
+ if (dest_dev_flushed)
+ *dest_dev_flushed = false;
+
mutex_lock(&clone->commit_lock);
if (!dm_clone_changed_this_transaction(clone->cmd))
@@ -1122,8 +1131,26 @@ static int commit_metadata(struct clone *clone)
goto out;
}
- r = dm_clone_metadata_commit(clone->cmd);
+ r = dm_clone_metadata_pre_commit(clone->cmd);
+ if (unlikely(r)) {
+ __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
+ goto out;
+ }
+ bio_reset(&clone->flush_bio);
+ bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
+ clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ r = submit_bio_wait(&clone->flush_bio);
+ if (unlikely(r)) {
+ __metadata_operation_failed(clone, "flush destination device", r);
+ goto out;
+ }
+
+ if (dest_dev_flushed)
+ *dest_dev_flushed = true;
+
+ r = dm_clone_metadata_commit(clone->cmd);
if (unlikely(r)) {
__metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
goto out;
@@ -1194,6 +1221,7 @@ static void process_deferred_bios(struct clone *clone)
static void process_deferred_flush_bios(struct clone *clone)
{
struct bio *bio;
+ bool dest_dev_flushed;
struct bio_list bios = BIO_EMPTY_LIST;
struct bio_list bio_completions = BIO_EMPTY_LIST;
@@ -1213,7 +1241,7 @@ static void process_deferred_flush_bios(struct clone *clone)
!(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
return;
- if (commit_metadata(clone)) {
+ if (commit_metadata(clone, &dest_dev_flushed)) {
bio_list_merge(&bios, &bio_completions);
while ((bio = bio_list_pop(&bios)))
@@ -1227,8 +1255,17 @@ static void process_deferred_flush_bios(struct clone *clone)
while ((bio = bio_list_pop(&bio_completions)))
bio_endio(bio);
- while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
+ while ((bio = bio_list_pop(&bios))) {
+ if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
+ /* We just flushed the destination device as part of
+ * the metadata commit, so there is no reason to send
+ * another flush.
+ */
+ bio_endio(bio);
+ } else {
+ generic_make_request(bio);
+ }
+ }
}
static void do_worker(struct work_struct *work)
@@ -1400,7 +1437,7 @@ static void clone_status(struct dm_target *ti, status_type_t type,
/* Commit to ensure statistics aren't out-of-date */
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
- (void) commit_metadata(clone);
+ (void) commit_metadata(clone, NULL);
r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
@@ -1834,6 +1871,7 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&clone->deferred_flush_completions);
clone->hydration_offset = 0;
atomic_set(&clone->hydrations_in_flight, 0);
+ bio_init(&clone->flush_bio, NULL, 0);
clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
if (!clone->wq) {
@@ -1907,6 +1945,7 @@ static void clone_dtr(struct dm_target *ti)
struct clone *clone = ti->private;
mutex_destroy(&clone->commit_lock);
+ bio_uninit(&clone->flush_bio);
for (i = 0; i < clone->nr_ctr_args; i++)
kfree(clone->ctr_args[i]);
@@ -1961,7 +2000,7 @@ static void clone_postsuspend(struct dm_target *ti)
wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
flush_workqueue(clone->wq);
- (void) commit_metadata(clone);
+ (void) commit_metadata(clone, NULL);
}
static void clone_resume(struct dm_target *ti)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index dbcc1e41cd57..e0c32793c248 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -599,45 +599,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
return pgpath;
}
-static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
-{
- struct pgpath *pgpath;
- unsigned long flags;
-
- /* Do we need to select a new pgpath? */
- /*
- * FIXME: currently only switching path if no path (due to failure, etc)
- * - which negates the point of using a path selector
- */
- pgpath = READ_ONCE(m->current_pgpath);
- if (!pgpath)
- pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
-
- if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- /* Queue for the daemon to resubmit */
- spin_lock_irqsave(&m->lock, flags);
- bio_list_add(&m->queued_bios, bio);
- spin_unlock_irqrestore(&m->lock, flags);
- queue_work(kmultipathd, &m->process_queued_bios);
-
- return ERR_PTR(-EAGAIN);
- }
- return NULL;
- }
-
- return pgpath;
-}
-
static int __multipath_map_bio(struct multipath *m, struct bio *bio,
struct dm_mpath_io *mpio)
{
- struct pgpath *pgpath;
-
- if (!m->hw_handler_name)
- pgpath = __map_bio_fast(m, bio);
- else
- pgpath = __map_bio(m, bio);
+ struct pgpath *pgpath = __map_bio(m, bio);
if (IS_ERR(pgpath))
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 4c68a7b93d5e..b88d6d701f5b 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -189,6 +189,15 @@ struct dm_pool_metadata {
sector_t data_block_size;
/*
+ * Pre-commit callback.
+ *
+ * This allows the thin provisioning target to run a callback before
+ * the metadata are committed.
+ */
+ dm_pool_pre_commit_fn pre_commit_fn;
+ void *pre_commit_context;
+
+ /*
* We reserve a section of the metadata for commit overhead.
* All reported space does *not* include this.
*/
@@ -826,6 +835,14 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (unlikely(!pmd->in_service))
return 0;
+ if (pmd->pre_commit_fn) {
+ r = pmd->pre_commit_fn(pmd->pre_commit_context);
+ if (r < 0) {
+ DMERR("pre-commit callback failed");
+ return r;
+ }
+ }
+
r = __write_changed_details(pmd);
if (r < 0)
return r;
@@ -892,6 +909,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
+ pmd->pre_commit_fn = NULL;
+ pmd->pre_commit_context = NULL;
r = __create_persistent_data_objects(pmd, format_device);
if (r) {
@@ -2044,6 +2063,16 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
return r;
}
+void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
+ dm_pool_pre_commit_fn fn,
+ void *context)
+{
+ pmd_write_lock_in_core(pmd);
+ pmd->pre_commit_fn = fn;
+ pmd->pre_commit_context = context;
+ pmd_write_unlock(pmd);
+}
+
int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index f6be0d733c20..7ef56bd2a7e3 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
*/
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
+/* Pre-commit callback */
+typedef int (*dm_pool_pre_commit_fn)(void *context);
+
+void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
+ dm_pool_pre_commit_fn fn,
+ void *context);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5a2c494cb552..57626c27a54b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -328,6 +328,7 @@ struct pool_c {
dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
+ struct bio flush_bio;
};
/*
@@ -2383,8 +2384,16 @@ static void process_deferred_bios(struct pool *pool)
while ((bio = bio_list_pop(&bio_completions)))
bio_endio(bio);
- while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
+ while ((bio = bio_list_pop(&bios))) {
+ /*
+ * The data device was flushed as part of metadata commit,
+ * so complete redundant flushes immediately.
+ */
+ if (bio->bi_opf & REQ_PREFLUSH)
+ bio_endio(bio);
+ else
+ generic_make_request(bio);
+ }
}
static void do_worker(struct work_struct *ws)
@@ -3115,6 +3124,7 @@ static void pool_dtr(struct dm_target *ti)
__pool_dec(pt->pool);
dm_put_device(ti, pt->metadata_dev);
dm_put_device(ti, pt->data_dev);
+ bio_uninit(&pt->flush_bio);
kfree(pt);
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3180,6 +3190,29 @@ static void metadata_low_callback(void *context)
dm_table_event(pool->ti->table);
}
+/*
+ * We need to flush the data device **before** committing the metadata.
+ *
+ * This ensures that the data blocks of any newly inserted mappings are
+ * properly written to non-volatile storage and won't be lost in case of a
+ * crash.
+ *
+ * Failure to do so can result in data corruption in the case of internal or
+ * external snapshots and in the case of newly provisioned blocks, when block
+ * zeroing is enabled.
+ */
+static int metadata_pre_commit_callback(void *context)
+{
+ struct pool_c *pt = context;
+ struct bio *flush_bio = &pt->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, pt->data_dev->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static sector_t get_dev_size(struct block_device *bdev)
{
return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
@@ -3348,6 +3381,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
+ bio_init(&pt->flush_bio, NULL, 0);
ti->num_flush_bios = 1;
/*
@@ -3374,6 +3408,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto out_flags_changed;
+ dm_pool_register_pre_commit_callback(pt->pool->pmd,
+ metadata_pre_commit_callback,
+ pt);
+
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 805b33e27496..4e7c9f398bc6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
/* not spare disk, or LEVEL_MULTIPATH */
if (sb->level == LEVEL_MULTIPATH ||
(rdev->desc_nr >= 0 &&
+ rdev->desc_nr < MD_SB_DISKS &&
sb->disks[rdev->desc_nr].state &
((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
spare_disk = false;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 21ea537bd55e..eff04fa23dfa 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned threshold = 2 * merge_threshold(left) + 1;
+ /*
+ * Ensure the number of entries in each child will be greater
+ * than or equal to (max_entries / 3 + 1), so no matter which
+ * child is used for removal, the number will still be not
+ * less than (max_entries / 3).
+ */
+ unsigned int threshold = 2 * (merge_threshold(left) + 1);
if (nr_left + nr_right < threshold) {
/*
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a409ab6f30bc..201fd8aec59a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2782,7 +2782,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
write_targets++;
}
}
- if (bio->bi_end_io) {
+ if (rdev && bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio_set_dev(bio, rdev->bdev);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index cab5b1352892..d50238d0a85d 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1360,7 +1360,7 @@ int ppl_init_log(struct r5conf *conf)
return -EINVAL;
}
- max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+ max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
BITS_PER_BYTE;
if (conf->raid_disks > max_disks) {
pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f0fc538bfe59..d4d3b67ffbba 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
do_flush = false;
}
- if (!sh->batch_head)
+ if (!sh->batch_head || sh == sh->batch_head)
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 97d660606d98..4dbdf3180d10 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -753,7 +753,7 @@ static const struct preview_update update_attrs[] = {
preview_config_luma_enhancement,
preview_enable_luma_enhancement,
offsetof(struct prev_params, luma),
- FIELD_SIZEOF(struct prev_params, luma),
+ sizeof_field(struct prev_params, luma),
offsetof(struct omap3isp_prev_update_config, luma),
}, /* OMAP3ISP_PREV_INVALAW */ {
NULL,
@@ -762,55 +762,55 @@ static const struct preview_update update_attrs[] = {
preview_config_hmed,
preview_enable_hmed,
offsetof(struct prev_params, hmed),
- FIELD_SIZEOF(struct prev_params, hmed),
+ sizeof_field(struct prev_params, hmed),
offsetof(struct omap3isp_prev_update_config, hmed),
}, /* OMAP3ISP_PREV_CFA */ {
preview_config_cfa,
NULL,
offsetof(struct prev_params, cfa),
- FIELD_SIZEOF(struct prev_params, cfa),
+ sizeof_field(struct prev_params, cfa),
offsetof(struct omap3isp_prev_update_config, cfa),
}, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
preview_config_chroma_suppression,
preview_enable_chroma_suppression,
offsetof(struct prev_params, csup),
- FIELD_SIZEOF(struct prev_params, csup),
+ sizeof_field(struct prev_params, csup),
offsetof(struct omap3isp_prev_update_config, csup),
}, /* OMAP3ISP_PREV_WB */ {
preview_config_whitebalance,
NULL,
offsetof(struct prev_params, wbal),
- FIELD_SIZEOF(struct prev_params, wbal),
+ sizeof_field(struct prev_params, wbal),
offsetof(struct omap3isp_prev_update_config, wbal),
}, /* OMAP3ISP_PREV_BLKADJ */ {
preview_config_blkadj,
NULL,
offsetof(struct prev_params, blkadj),
- FIELD_SIZEOF(struct prev_params, blkadj),
+ sizeof_field(struct prev_params, blkadj),
offsetof(struct omap3isp_prev_update_config, blkadj),
}, /* OMAP3ISP_PREV_RGB2RGB */ {
preview_config_rgb_blending,
NULL,
offsetof(struct prev_params, rgb2rgb),
- FIELD_SIZEOF(struct prev_params, rgb2rgb),
+ sizeof_field(struct prev_params, rgb2rgb),
offsetof(struct omap3isp_prev_update_config, rgb2rgb),
}, /* OMAP3ISP_PREV_COLOR_CONV */ {
preview_config_csc,
NULL,
offsetof(struct prev_params, csc),
- FIELD_SIZEOF(struct prev_params, csc),
+ sizeof_field(struct prev_params, csc),
offsetof(struct omap3isp_prev_update_config, csc),
}, /* OMAP3ISP_PREV_YC_LIMIT */ {
preview_config_yc_range,
NULL,
offsetof(struct prev_params, yclimit),
- FIELD_SIZEOF(struct prev_params, yclimit),
+ sizeof_field(struct prev_params, yclimit),
offsetof(struct omap3isp_prev_update_config, yclimit),
}, /* OMAP3ISP_PREV_DEFECT_COR */ {
preview_config_dcor,
preview_enable_dcor,
offsetof(struct prev_params, dcor),
- FIELD_SIZEOF(struct prev_params, dcor),
+ sizeof_field(struct prev_params, dcor),
offsetof(struct omap3isp_prev_update_config, dcor),
}, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ {
NULL,
@@ -828,13 +828,13 @@ static const struct preview_update update_attrs[] = {
preview_config_noisefilter,
preview_enable_noisefilter,
offsetof(struct prev_params, nf),
- FIELD_SIZEOF(struct prev_params, nf),
+ sizeof_field(struct prev_params, nf),
offsetof(struct omap3isp_prev_update_config, nf),
}, /* OMAP3ISP_PREV_GAMMA */ {
preview_config_gammacorrn,
preview_enable_gammacorrn,
offsetof(struct prev_params, gamma),
- FIELD_SIZEOF(struct prev_params, gamma),
+ sizeof_field(struct prev_params, gamma),
offsetof(struct omap3isp_prev_update_config, gamma),
}, /* OMAP3ISP_PREV_CONTRAST */ {
preview_config_contrast,
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 4e700583659b..003b7422aeef 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2652,7 +2652,7 @@ struct v4l2_ioctl_info {
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
+ sizeof_field(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a880f10e3e70..8083173f1a8f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -129,13 +129,13 @@ struct xgbe_stats {
#define XGMAC_MMC_STAT(_string, _var) \
{ _string, \
- FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ sizeof_field(struct xgbe_mmc_stats, _var), \
offsetof(struct xgbe_prv_data, mmc_stats._var), \
}
#define XGMAC_EXT_STAT(_string, _var) \
{ _string, \
- FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
+ sizeof_field(struct xgbe_ext_stats, _var), \
offsetof(struct xgbe_prv_data, ext_stats._var), \
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 0cc2338d8d2a..dfc77507b159 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -205,11 +205,11 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct,
major_version = (u32)__cvmx_bootmem_desc_get(
oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc, major_version),
- FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version));
+ sizeof_field(struct cvmx_bootmem_desc, major_version));
minor_version = (u32)__cvmx_bootmem_desc_get(
oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc, minor_version),
- FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version));
+ sizeof_field(struct cvmx_bootmem_desc, minor_version));
dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
major_version);
@@ -237,13 +237,13 @@ static const struct cvmx_bootmem_named_block_desc
oct, named_addr,
offsetof(struct cvmx_bootmem_named_block_desc,
base_addr),
- FIELD_SIZEOF(
+ sizeof_field(
struct cvmx_bootmem_named_block_desc,
base_addr));
desc->size = __cvmx_bootmem_desc_get(oct, named_addr,
offsetof(struct cvmx_bootmem_named_block_desc,
size),
- FIELD_SIZEOF(
+ sizeof_field(
struct cvmx_bootmem_named_block_desc,
size));
@@ -268,20 +268,20 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
named_block_array_addr),
- FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ sizeof_field(struct cvmx_bootmem_desc,
named_block_array_addr));
u32 num_blocks = (u32)__cvmx_bootmem_desc_get(
oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
nb_num_blocks),
- FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ sizeof_field(struct cvmx_bootmem_desc,
nb_num_blocks));
u32 name_length = (u32)__cvmx_bootmem_desc_get(
oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
named_block_name_len),
- FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ sizeof_field(struct cvmx_bootmem_desc,
named_block_name_len));
u64 named_addr = named_block_array_addr;
@@ -292,7 +292,7 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
offsetof(
struct cvmx_bootmem_named_block_desc,
size),
- FIELD_SIZEOF(
+ sizeof_field(
struct cvmx_bootmem_named_block_desc,
size));
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 5bb5abf99588..022a54a1805b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -23,7 +23,7 @@ struct be_ethtool_stat {
};
enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
-#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
+#define FIELDINFO(_struct, field) sizeof_field(_struct, field), \
offsetof(_struct, field)
#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
FIELDINFO(struct be_tx_stats, field)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d862e9ba27e1..13dbd249f35f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -10240,7 +10240,7 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
return ret;
}
- data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
*len = 0;
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index fbc39a2480d0..180224eab1ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -614,7 +614,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
}
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
- FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
+ sizeof_field(struct hnae3_knic_private_info, prio_tc));
}
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 60ec48fe4144..966aea949c0b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -450,7 +450,7 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
- .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \
+ .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
.offset = offsetof(struct hinic_vport_stats, _stat_item) \
}
@@ -477,7 +477,7 @@ static struct hinic_stats hinic_function_stats[] = {
#define HINIC_PORT_STAT(_stat_item) { \
.name = #_stat_item, \
- .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \
+ .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
}
@@ -571,7 +571,7 @@ static struct hinic_stats hinic_port_stats[] = {
#define HINIC_TXQ_STAT(_stat_item) { \
.name = "txq%d_"#_stat_item, \
- .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \
+ .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
.offset = offsetof(struct hinic_txq_stats, _stat_item) \
}
@@ -586,7 +586,7 @@ static struct hinic_stats hinic_tx_queue_stats[] = {
#define HINIC_RXQ_STAT(_stat_item) { \
.name = "rxq%d_"#_stat_item, \
- .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \
+ .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
.offset = offsetof(struct hinic_rxq_stats, _stat_item) \
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c681d2d28107..68edf55ac906 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -18,7 +18,7 @@ struct fm10k_stats {
#define FM10K_STAT_FIELDS(_type, _name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index d24d8731bef0..317f3f1458db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -43,7 +43,7 @@ struct i40e_stats {
*/
#define I40E_STAT(_type, _name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index be24d42280d8..a3da422ab05b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -659,7 +659,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
#define I40E_HMC_STORE(_struct, _ele) \
offsetof(struct _struct, _ele), \
- FIELD_SIZEOF(struct _struct, _ele)
+ sizeof_field(struct _struct, _ele)
struct i40e_context_ele {
u16 offset;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index dad3eec8ccd8..84c3d8d97ef6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -42,7 +42,7 @@ struct iavf_stats {
*/
#define IAVF_STAT(_type, _name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index aec3c6c379df..9ebd93e79aeb 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -15,7 +15,7 @@ struct ice_stats {
#define ICE_STAT(_type, _name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
@@ -36,10 +36,10 @@ static int ice_q_stats_len(struct net_device *netdev)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
#define ICE_PFC_STATS_LEN ( \
- (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
- FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
- FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
- FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
+ (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \
+ sizeof_field(struct ice_pf, stats.priority_xon_rx) + \
+ sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \
+ sizeof_field(struct ice_pf, stats.priority_xon_tx)) \
/ sizeof(u64))
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
ICE_VSI_STATS_LEN + ice_q_stats_len(n))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index ad34f22d44ef..0997d352709b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -302,7 +302,7 @@ struct ice_ctx_ele {
#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
.offset = offsetof(struct _struct, _ele), \
- .size_of = FIELD_SIZEOF(struct _struct, _ele), \
+ .size_of = sizeof_field(struct _struct, _ele), \
.width = _width, \
.lsb = _lsb, \
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 3182b059bf55..4690d6c87f39 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -26,7 +26,7 @@ struct igb_stats {
#define IGB_STAT(_name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+ .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \
.stat_offset = offsetof(struct igb_adapter, _stat) \
}
static const struct igb_stats igb_gstrings_stats[] = {
@@ -76,7 +76,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
#define IGB_NETDEV_STAT(_net_stat) { \
.stat_string = __stringify(_net_stat), \
- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \
.stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
}
static const struct igb_stats igb_gstrings_net_stats[] = {
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ac98f1d96892..455c1cdceb6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -16,7 +16,7 @@ struct igc_stats {
#define IGC_STAT(_name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+ .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \
.stat_offset = offsetof(struct igc_adapter, _stat) \
}
@@ -67,7 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = {
#define IGC_NETDEV_STAT(_net_stat) { \
.stat_string = __stringify(_net_stat), \
- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \
.stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index c8c93ac436d4..c65eb1afc8fb 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -19,10 +19,10 @@ struct ixgb_stats {
};
#define IXGB_STAT(m) IXGB_STATS, \
- FIELD_SIZEOF(struct ixgb_adapter, m), \
+ sizeof_field(struct ixgb_adapter, m), \
offsetof(struct ixgb_adapter, m)
#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
- FIELD_SIZEOF(struct net_device, m), \
+ sizeof_field(struct net_device, m), \
offsetof(struct net_device, m)
static struct ixgb_stats ixgb_gstrings_stats[] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54459b69c948..f7f309c96fa8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -31,14 +31,14 @@ struct ixgbe_stats {
#define IXGBEVF_STAT(_name, _stat) { \
.stat_string = _name, \
.type = IXGBEVF_STATS, \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+ .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
.stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
}
#define IXGBEVF_NETDEV_STAT(_net_stat) { \
.stat_string = #_net_stat, \
.type = NETDEV_STATS, \
- .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
.stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d5b644131cff..65a093216dac 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1432,11 +1432,11 @@ struct mv643xx_eth_stats {
};
#define SSTAT(m) \
- { #m, FIELD_SIZEOF(struct net_device_stats, m), \
+ { #m, sizeof_field(struct net_device_stats, m), \
offsetof(struct net_device, stats.m), -1 }
#define MIBSTAT(m) \
- { #m, FIELD_SIZEOF(struct mib_counters, m), \
+ { #m, sizeof_field(struct mib_counters, m), \
-1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a1202e53710c..8bf1f08fdee2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -611,7 +611,7 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
}
#define MLX4_LINK_MODES_SZ \
- (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
+ (sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8)
enum ethtool_report {
SUPPORTED = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index c76da309506b..e4ec0e03c289 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -87,10 +87,10 @@ static const struct rhashtable_params rhash_sa = {
* value is not constant during the lifetime
* of the key object.
*/
- .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
- FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+ .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
+ sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
- FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+ sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d60577484567..9a48c4310887 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -209,7 +209,7 @@ enum fs_i_lock_class {
};
static const struct rhashtable_params rhash_fte = {
- .key_len = FIELD_SIZEOF(struct fs_fte, val),
+ .key_len = sizeof_field(struct fs_fte, val),
.key_offset = offsetof(struct fs_fte, val),
.head_offset = offsetof(struct fs_fte, hash),
.automatic_shrinking = true,
@@ -217,7 +217,7 @@ static const struct rhashtable_params rhash_fte = {
};
static const struct rhashtable_params rhash_fg = {
- .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
+ .key_len = sizeof_field(struct mlx5_flow_group, mask),
.key_offset = offsetof(struct mlx5_flow_group, mask),
.head_offset = offsetof(struct mlx5_flow_group, hash),
.automatic_shrinking = true,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index c80bb83c8ac9..0a721f6e8676 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -2652,17 +2652,17 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
switch (meta->insn.off) {
case offsetof(struct __sk_buff, len):
- if (size != FIELD_SIZEOF(struct __sk_buff, len))
+ if (size != sizeof_field(struct __sk_buff, len))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
break;
case offsetof(struct __sk_buff, data):
- if (size != FIELD_SIZEOF(struct __sk_buff, data))
+ if (size != sizeof_field(struct __sk_buff, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct __sk_buff, data_end):
- if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
+ if (size != sizeof_field(struct __sk_buff, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
@@ -2683,12 +2683,12 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
switch (meta->insn.off) {
case offsetof(struct xdp_md, data):
- if (size != FIELD_SIZEOF(struct xdp_md, data))
+ if (size != sizeof_field(struct xdp_md, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct xdp_md, data_end):
- if (size != FIELD_SIZEOF(struct xdp_md, data_end))
+ if (size != sizeof_field(struct xdp_md, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 8f732771d3fa..11c83a99b014 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -15,7 +15,7 @@
const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4,
- .key_len = FIELD_SIZEOF(struct bpf_map, id),
+ .key_len = sizeof_field(struct bpf_map, id),
.key_offset = offsetof(struct nfp_bpf_neutral_map, map_id),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 95a0d3910e31..ac02369174a9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -374,7 +374,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
}
use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
- FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
+ sizeof_field(struct nfp_bpf_map, use_map[0]);
nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
if (!nfp_map)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 31d94592a7c0..e0c985fcaec1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -24,7 +24,7 @@ struct nfp_app;
#define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22)
#define NFP_FL_STAT_ID_STAT GENMASK(21, 0)
-#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
+#define NFP_FL_STATS_ELEM_RS sizeof_field(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1a3008e33182..b36aa5bf3c5f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -20,7 +20,7 @@ struct pch_gbe_stats {
#define PCH_GBE_STAT(m) \
{ \
.string = #m, \
- .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \
+ .size = sizeof_field(struct pch_gbe_hw_stats, m), \
.offset = offsetof(struct pch_gbe_hw_stats, m), \
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index c303a92d5b06..e8a1b27db84d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -464,7 +464,7 @@ struct qede_fastpath {
struct qede_tx_queue *txq;
struct qede_tx_queue *xdp_tx;
-#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8)
+#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8)
char name[VEC_NAME_SIZE];
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index a4cd6f2cfb86..75d83c3cbf27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -20,7 +20,7 @@ struct qlcnic_stats {
int stat_offset;
};
-#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
static const u32 qlcnic_fw_dump_level[] = {
0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index 355cc810e322..cbc6b846ded5 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -37,7 +37,7 @@ struct fw_info {
u8 chksum;
} __packed;
-#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0])
+#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0])
static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
{
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0775b9464b4e..466483c4ac67 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -30,7 +30,7 @@ struct sxgbe_stats {
#define SXGBE_STAT(m) \
{ \
#m, \
- FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
+ sizeof_field(struct sxgbe_extra_stats, m), \
offsetof(struct sxgbe_priv_data, xstats.m) \
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1a768837ca72..b29603ec744c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -34,7 +34,7 @@ struct stmmac_stats {
};
#define STMMAC_STAT(m) \
- { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ { #m, sizeof_field(struct stmmac_extra_stats, m), \
offsetof(struct stmmac_priv, xstats.m)}
static const struct stmmac_stats stmmac_gstrings_stats[] = {
@@ -163,7 +163,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
/* HW MAC Management counters (if supported) */
#define STMMAC_MMC_STAT(m) \
- { #m, FIELD_SIZEOF(struct stmmac_counters, m), \
+ { #m, sizeof_field(struct stmmac_counters, m), \
offsetof(struct stmmac_priv, mmc.m)}
static const struct stmmac_stats stmmac_mmc[] = {
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 31248a6cc642..fa54efe3be63 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -73,13 +73,13 @@ enum {
};
#define CPSW_STAT(m) CPSW_STATS, \
- FIELD_SIZEOF(struct cpsw_hw_stats, m), \
+ sizeof_field(struct cpsw_hw_stats, m), \
offsetof(struct cpsw_hw_stats, m)
#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ sizeof_field(struct cpdma_chan_stats, m), \
offsetof(struct cpdma_chan_stats, m)
#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ sizeof_field(struct cpdma_chan_stats, m), \
offsetof(struct cpdma_chan_stats, m)
static const struct cpsw_stats cpsw_gstrings_stats[] = {
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 86a3f42a3dcc..d6a192c1f337 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -783,28 +783,28 @@ struct netcp_ethtool_stat {
#define GBE_STATSA_INFO(field) \
{ \
"GBE_A:"#field, GBE_STATSA_MODULE, \
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ sizeof_field(struct gbe_hw_stats, field), \
offsetof(struct gbe_hw_stats, field) \
}
#define GBE_STATSB_INFO(field) \
{ \
"GBE_B:"#field, GBE_STATSB_MODULE, \
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ sizeof_field(struct gbe_hw_stats, field), \
offsetof(struct gbe_hw_stats, field) \
}
#define GBE_STATSC_INFO(field) \
{ \
"GBE_C:"#field, GBE_STATSC_MODULE, \
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ sizeof_field(struct gbe_hw_stats, field), \
offsetof(struct gbe_hw_stats, field) \
}
#define GBE_STATSD_INFO(field) \
{ \
"GBE_D:"#field, GBE_STATSD_MODULE, \
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ sizeof_field(struct gbe_hw_stats, field), \
offsetof(struct gbe_hw_stats, field) \
}
@@ -957,7 +957,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
#define GBENU_STATS_HOST(field) \
{ \
"GBE_HOST:"#field, GBENU_STATS0_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
@@ -967,56 +967,56 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
#define GBENU_STATS_P1(field) \
{ \
"GBE_P1:"#field, GBENU_STATS1_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P2(field) \
{ \
"GBE_P2:"#field, GBENU_STATS2_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P3(field) \
{ \
"GBE_P3:"#field, GBENU_STATS3_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P4(field) \
{ \
"GBE_P4:"#field, GBENU_STATS4_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P5(field) \
{ \
"GBE_P5:"#field, GBENU_STATS5_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P6(field) \
{ \
"GBE_P6:"#field, GBENU_STATS6_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P7(field) \
{ \
"GBE_P7:"#field, GBENU_STATS7_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
#define GBENU_STATS_P8(field) \
{ \
"GBE_P8:"#field, GBENU_STATS8_MODULE, \
- FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ sizeof_field(struct gbenu_hw_stats, field), \
offsetof(struct gbenu_hw_stats, field) \
}
@@ -1607,21 +1607,21 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
#define XGBE_STATS0_INFO(field) \
{ \
"GBE_0:"#field, XGBE_STATS0_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ sizeof_field(struct xgbe_hw_stats, field), \
offsetof(struct xgbe_hw_stats, field) \
}
#define XGBE_STATS1_INFO(field) \
{ \
"GBE_1:"#field, XGBE_STATS1_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ sizeof_field(struct xgbe_hw_stats, field), \
offsetof(struct xgbe_hw_stats, field) \
}
#define XGBE_STATS2_INFO(field) \
{ \
"GBE_2:"#field, XGBE_STATS2_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ sizeof_field(struct xgbe_hw_stats, field), \
offsetof(struct xgbe_hw_stats, field) \
}
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 09f3604cfbf8..746736c83873 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -21,7 +21,7 @@ struct fjes_stats {
#define FJES_STAT(name, stat) { \
.stat_string = name, \
- .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \
+ .sizeof_stat = sizeof_field(struct fjes_adapter, stat), \
.stat_offset = offsetof(struct fjes_adapter, stat) \
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5c6b7fc04ea6..75757e9954ba 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1156,7 +1156,7 @@ static void geneve_setup(struct net_device *dev)
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
- [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
[IFLA_GENEVE_TTL] = { .type = NLA_U8 },
[IFLA_GENEVE_TOS] = { .type = NLA_U8 },
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index eff8fef4f775..02e66473f2ed 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -571,7 +571,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Use the skb control buffer for building up the packet */
BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
packet = (struct hv_netvsc_packet *)skb->cb;
packet->q_idx = skb_get_queue_mapping(skb);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 34c1eaba536c..389d19dd7909 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -865,7 +865,7 @@ static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
u16 len;
bool need_tail;
- BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ BUILD_BUG_ON(sizeof_field(struct usbnet, data)
< sizeof(struct cdc_state));
dev_dbg(&dev->udev->dev, "%s", __func__);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 30e511c2c8d0..9ce6d30576dd 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -2184,7 +2184,7 @@ static int __init usbnet_init(void)
{
/* Compiler should optimize this out. */
BUILD_BUG_ON(
- FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
+ sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
eth_random_addr(node_id);
return 0;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4c34375c2e22..3ec6b506033d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3069,10 +3069,10 @@ static void vxlan_raw_setup(struct net_device *dev)
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
- [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VXLAN_GROUP] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
- [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VXLAN_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
[IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c
index fe14814af300..c604613ab506 100644
--- a/drivers/net/wireless/marvell/libertas/debugfs.c
+++ b/drivers/net/wireless/marvell/libertas/debugfs.c
@@ -774,7 +774,7 @@ void lbs_debugfs_remove_one(struct lbs_private *priv)
#ifdef PROC_DEBUG
-#define item_size(n) (FIELD_SIZEOF(struct lbs_private, n))
+#define item_size(n) (sizeof_field(struct lbs_private, n))
#define item_addr(n) (offsetof(struct lbs_private, n))
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index c386992abcdb..7cafcecd7b85 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -36,11 +36,11 @@ struct mwifiex_cb {
};
/* size/addr for mwifiex_debug_info */
-#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
+#define item_size(n) (sizeof_field(struct mwifiex_debug_info, n))
#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
/* size/addr for struct mwifiex_adapter */
-#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
+#define adapter_item_size(n) (sizeof_field(struct mwifiex_adapter, n))
#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
struct mwifiex_debug_data {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dfe37a525f3a..667f18f465be 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
if (ret)
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", ret);
+ if (ret > 0)
+ ret = 0;
}
return ret;
}
@@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* admin connect
*/
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+ dev_err(ctrl->device,
+ "Mismatching cntlid: Connect %u vs Identify "
+ "%u, rejecting\n",
+ ctrl->cntlid, le16_to_cpu(id->cntlid));
ret = -EINVAL;
goto out_free;
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 679a721ae229..5a70ac395d53 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
struct nvme_fcp_op_w_sgl {
struct nvme_fc_fcp_op op;
- struct scatterlist sgl[SG_CHUNK_SIZE];
+ struct scatterlist sgl[NVME_INLINE_SG_CNT];
uint8_t priv[0];
};
@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary) {
+ !template->max_dif_sgl_segments || !template->dma_boundary ||
+ !template->module) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
+ struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
+ module_put(lport->ops->module);
}
static void
@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_table.sgl = freq->first_sgl;
ret = sg_alloc_table_chained(&freq->sg_table,
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
- SG_CHUNK_SIZE);
+ NVME_INLINE_SG_CNT);
if (ret)
return -ENOMEM;
@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, rq_dma_dir(rq));
if (unlikely(freq->sg_cnt <= 0)) {
- sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0;
return -EFAULT;
}
@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq));
- sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0;
}
@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
static void
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
{
- nvme_stop_keep_alive(&ctrl->ctrl);
+ /*
+ * if state is connecting - the error occurred as part of a
+ * reconnect attempt. The create_association error paths will
+ * clean up any outstanding io.
+ *
+ * if it's a different state - ensure all pending io is
+ * terminated. Given this can delay while waiting for the
+ * aborted io to return, we recheck adapter state below
+ * before changing state.
+ */
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
+ nvme_stop_keep_alive(&ctrl->ctrl);
- /* will block will waiting for io to terminate */
- nvme_fc_delete_association(ctrl);
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
+ }
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
+ if (!try_module_get(lport->ops->module)) {
+ ret = -EUNATCH;
+ goto out_free_ctrl;
+ }
+
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_free_ctrl;
+ goto out_mod_put;
}
ctrl->ctrl.opts = opts;
@@ -3215,6 +3235,8 @@ out_free_queues:
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_mod_put:
+ module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3b9cbe0668fa..1024fec7914c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
#define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define NVME_INLINE_SG_CNT 0
+#else
+#define NVME_INLINE_SG_CNT 2
+#endif
+
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index dcaad5831cee..365a2ddbeaa7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
-static int write_queues;
-module_param(write_queues, int, 0644);
+static unsigned int write_queues;
+module_param(write_queues, uint, 0644);
MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes "
"will share a queue set.");
-static int poll_queues;
-module_param(poll_queues, int, 0644);
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
struct nvme_dev;
@@ -176,7 +176,6 @@ struct nvme_queue {
u16 sq_tail;
u16 last_sq_tail;
u16 cq_head;
- u16 last_cq_head;
u16 qid;
u8 cq_phase;
u8 sqes;
@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
* the irq handler, even if that was on another CPU.
*/
rmb();
- if (nvmeq->cq_head != nvmeq->last_cq_head)
- ret = IRQ_HANDLED;
nvme_process_cq(nvmeq, &start, &end, -1);
- nvmeq->last_cq_head = nvmeq->cq_head;
wmb();
if (start != end) {
@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
return result;
- else if (result)
+ if (result)
goto release_cq;
nvmeq->cq_vector = vector;
@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, this_p_queues;
- unsigned int nr_cpus = num_possible_cpus();
/*
* Poll queues don't need interrupts, but we need at least one IO
@@ -2069,10 +2064,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- if (nr_cpus < nr_io_queues - this_p_queues)
- irq_queues = nr_cpus + 1;
- else
- irq_queues = nr_io_queues - this_p_queues + 1;
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
@@ -3142,6 +3134,9 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+
+ write_queues = min(write_queues, num_possible_cpus());
+ poll_queues = min(poll_queues, num_possible_cpus());
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dce59459ed41..2a47c6c5007e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT;
@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->sg_table.sgl = req->first_sgl;
ret = sg_alloc_table_chained(&req->sg_table,
blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
- SG_CHUNK_SIZE);
+ NVME_INLINE_SG_CNT);
if (ret)
return -ENOMEM;
@@ -1314,7 +1314,7 @@ out:
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
out_free_table:
- sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
return ret;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index b50b53db3746..1c50af6219f3 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
+ .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a758bb3d5dd4..4df4ebde208a 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
nvme_complete_rq(req);
}
@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+ iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index d93891a05f60..3371e4a06248 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -518,10 +518,11 @@ static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
+ device_links_supplier_sync_state_pause();
+
if (!of_have_populated_dt())
return -ENODEV;
- device_links_supplier_sync_state_pause();
/*
* Handle certain compatibles explicitly, since we don't want to create
* platform_devices for every node in /reserved-memory with a
@@ -545,8 +546,7 @@ arch_initcall_sync(of_platform_default_populate_init);
static int __init of_platform_sync_state_init(void)
{
- if (of_have_populated_dt())
- device_links_supplier_sync_state_resume();
+ device_links_supplier_sync_state_resume();
return 0;
}
late_initcall_sync(of_platform_sync_state_init);
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index d9b63bfa5dd7..94af6f5828a3 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -834,10 +834,12 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
if (!entry)
return -ENODEV;
+ /* store the register number offset to program RC io outbound ATU */
+ offset = size >> 20;
+
size = resource_size(entry->res);
pci_addr = entry->res->start - entry->offset;
- offset = size >> 20;
for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b9a2349e4b90..33a62a6692c0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4779,7 +4779,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "qdioest");
- qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
+ qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
if (!qib_param_field) {
rc = -ENOMEM;
goto out_free_nothing;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 88f4dc140751..c1ecce95094d 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -421,7 +421,7 @@ struct qeth_ipacmd_setassparms {
} data;
} __attribute__ ((packed));
-#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\
+#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\
data.field)
/* SETRTG IPA Command: ****************************************************/
@@ -535,7 +535,7 @@ struct qeth_ipacmd_setadpparms {
} data;
} __attribute__ ((packed));
-#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\
+#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\
data.field)
/* CREATE_ADDR IPA Command: ***********************************************/
@@ -648,7 +648,7 @@ struct qeth_ipacmd_vnicc {
} data;
};
-#define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\
+#define VNICC_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_vnicc,\
data.field)
/* SETBRIDGEPORT IPA Command: *********************************************/
@@ -729,7 +729,7 @@ struct qeth_ipacmd_setbridgeport {
} data;
} __packed;
-#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\
+#define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\
data.field)
/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
@@ -790,7 +790,7 @@ struct qeth_ipa_cmd {
} data;
} __attribute__ ((packed));
-#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field)
+#define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field)
/*
* special command for ARP processing.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index e36608ce937a..33dbc051bff9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -535,7 +535,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
- int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+ int data_size = sizeof_field(struct aac_get_name_resp, data);
sp[data_size - 1] = '\0';
while (*sp == ' ')
@@ -574,7 +574,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+ data_size = sizeof_field(struct aac_get_name_resp, data);
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 063dccc18f70..5f9f0b18ddf3 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1300,7 +1300,7 @@ struct be_cmd_get_port_name {
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
- (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
+ (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
/**
* Different types of iSCSI completions to host driver for both initiator
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 0d044c165960..c4e4b0136f86 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2746,7 +2746,7 @@ static int __init libcxgbi_init_module(void)
{
pr_info("%s", version);
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
sizeof(struct cxgbi_skb_cb));
return 0;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ebd47c0cf9e9..70b99c0e2e67 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
- spin_lock(&session->frwd_lock);
+ spin_lock_bh(&session->frwd_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
@@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
done:
if (task)
task->last_timeout = jiffies;
- spin_unlock(&session->frwd_lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "shutdown or nh");
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index f47b4b281b14..d7302c2052f9 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
else
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
- } else {
+ } else if (port->oob_mode == SAS_OOB_MODE) {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
dev->dev_type = id->dev_type;
dev->iproto = id->initiator_bits;
dev->tproto = id->target_bits;
+ } else {
+ /* If the oob mode is OOB_NOT_CONNECTED, the port is
+ * disconnected due to race with PHY down. We cannot
+ * continue to discover this port
+ */
+ sas_put_device(dev);
+ pr_warn("Port %016llx is disconnected when discovering\n",
+ SAS_ADDR(port->attached_sas_addr));
+ return -ENODEV;
}
sas_init_dev(dev);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d4e1b120cc9e..0ea03ae93d91 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index db4a04a207ec..f6c8963c915d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1985,6 +1985,8 @@ out_unlock:
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
+ .module = THIS_MODULE,
+
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ae97e2f310a3..d7e7043f9eab 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -178,6 +178,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
faddr = ha->flt_region_nvram;
if (IS_QLA28XX(ha)) {
+ qla28xx_get_aux_images(vha, &active_regions);
if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_nvram_sec;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 99f0a1a08143..cbaf178fc979 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2399,7 +2399,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
struct qla_active_regions regions = { };
struct active_regions active_regions = { };
- qla28xx_get_aux_images(vha, &active_regions);
+ qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
if (IS_QLA28XX(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 460f443f6471..2edd9f7b3074 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2401,6 +2401,7 @@ typedef struct fc_port {
unsigned int id_changed:1;
unsigned int scan_needed:1;
unsigned int n2n_flag:1;
+ unsigned int explicit_logout:1;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 59f6903e5abe..9dc09c117416 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1523,6 +1523,10 @@ struct qla_flt_header {
#define FLT_REG_NVRAM_SEC_28XX_1 0x10F
#define FLT_REG_NVRAM_SEC_28XX_2 0x111
#define FLT_REG_NVRAM_SEC_28XX_3 0x113
+#define FLT_REG_MPI_PRI_28XX 0xD3
+#define FLT_REG_MPI_SEC_28XX 0xF0
+#define FLT_REG_PEP_PRI_28XX 0xD1
+#define FLT_REG_PEP_SEC_28XX 0xF1
struct qla_flt_region {
uint16_t code;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6c28f38f8021..aa5204163bec 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -533,6 +533,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
e->u.fcport.fcport = fcport;
fcport->flags |= FCF_ASYNC_ACTIVE;
+ fcport->disc_state = DSC_LOGIN_PEND;
return qla2x00_post_work(vha, e);
}
@@ -1526,8 +1527,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
}
}
- /* for pure Target Mode. Login will not be initiated */
- if (vha->host->active_mode == MODE_TARGET)
+ /* Target won't initiate port login if fabric is present */
+ if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
return 0;
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1719,6 +1720,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
struct event_arg *ea)
{
+ /* for pure Target Mode, PRLI will not be initiated */
+ if (vha->host->active_mode == MODE_TARGET)
+ return;
+
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post PRLI\n",
__func__, __LINE__, ea->fcport->port_name);
@@ -4852,6 +4857,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->free_work, qlt_free_session_done);
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -4930,14 +4936,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
set_bit(RSCN_UPDATE, &flags);
clear_bit(LOCAL_LOOP_UPDATE, &flags);
- } else if (ha->current_topology == ISP_CFG_N) {
- clear_bit(RSCN_UPDATE, &flags);
- if (qla_tgt_mode_enabled(vha)) {
- /* allow the other side to start the login */
- clear_bit(LOCAL_LOOP_UPDATE, &flags);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- }
- } else if (ha->current_topology == ISP_CFG_NL) {
+ } else if (ha->current_topology == ISP_CFG_NL ||
+ ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
} else if (!vha->flags.online ||
@@ -5054,7 +5054,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
memcpy(&ha->plogi_els_payld.data,
(void *)ha->init_cb,
sizeof(ha->plogi_els_payld.data));
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b25f87ff8cde..8b050f0b4333 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
static void
qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
+ u16 control_flags = LCF_COMMAND_LOGO;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags =
- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->keep_nport_handle)
- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+
+ if (sp->fcport->explicit_logout) {
+ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
+ } else {
+ control_flags |= LCF_IMPL_LOGO;
+
+ if (!sp->fcport->keep_nport_handle)
+ control_flags |= LCF_FREE_NPORT;
+ }
+
+ logio->control_flags = cpu_to_le16(control_flags);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2617,6 +2625,10 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
sizeof(struct els_logo_payload));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
+ elsio->u.els_logo.els_logo_pyld,
+ sizeof(*elsio->u.els_logo.els_logo_pyld));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -2676,7 +2688,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
- (uint8_t *)els_iocb, 0x70);
+ (uint8_t *)els_iocb,
+ sizeof(*els_iocb));
} else {
els_iocb->control_flags = 1 << 13;
els_iocb->tx_byte_count =
@@ -2688,6 +2701,11 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_byte_count = 0;
els_iocb->rx_address = 0;
els_iocb->rx_len = 0;
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
+ "LOGO ELS IOCB:");
+ ql_dump_buffer(ql_log_info, vha, 0x010b,
+ els_iocb,
+ sizeof(*els_iocb));
}
sp->vha->qla_stats.control_requests++;
@@ -2934,7 +2952,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
- (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+ sizeof(*elsio->u.els_plogi.els_plogi_pyld));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2601d7673c37..7b8a6bfcf08d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1061,8 +1061,6 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
-
- qlt_async_event(mb[0], vha, mb);
break;
}
@@ -1079,8 +1077,6 @@ global_port_update:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
-
- qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0cf94f05f008..b7c1108c48e2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3921,6 +3921,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
vha->d_id.b24 = 0;
vha->d_id.b.al_pa = 1;
ha->flags.n2n_bigger = 1;
+ ha->flags.n2n_ae = 0;
id.b.al_pa = 2;
ql_dbg(ql_dbg_async, vha, 0x5075,
@@ -3931,6 +3932,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Format 1: Remote login - Waiting for WWPN %8phC.\n",
rptid_entry->u.f1.port_name);
ha->flags.n2n_bigger = 0;
+ ha->flags.n2n_ae = 1;
}
qla24xx_post_newsess_work(vha, &id,
rptid_entry->u.f1.port_name,
@@ -3942,7 +3944,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
/* if our portname is higher then initiate N2N login */
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- ha->flags.n2n_ae = 1;
return;
break;
case TOPO_FL:
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 941aa53363f5..bfcd02fdf2b8 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
+ .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f2d5115b2d8d..bbe90354f49b 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -847,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_sec = start;
break;
case FLT_REG_FW_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_fw_sec = start;
break;
case FLT_REG_BOOTLOAD_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_boot_sec = start;
break;
case FLT_REG_AUX_IMG_PRI_28XX:
@@ -2725,8 +2725,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Region %x is secure\n", region.code);
- if (region.code == FLT_REG_FW ||
- region.code == FLT_REG_FW_SEC_27XX) {
+ switch (region.code) {
+ case FLT_REG_FW:
+ case FLT_REG_FW_SEC_27XX:
+ case FLT_REG_MPI_PRI_28XX:
+ case FLT_REG_MPI_SEC_28XX:
fw_array = dwptr;
/* 1st fw array */
@@ -2757,9 +2760,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
buf_size_without_sfub += risc_size;
fw_array += risc_size;
}
- } else {
- ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
- "Secure region %x not supported\n",
+ break;
+
+ case FLT_REG_PEP_PRI_28XX:
+ case FLT_REG_PEP_SEC_28XX:
+ fw_array = dwptr;
+
+ /* 1st fw array */
+ risc_size = be32_to_cpu(fw_array[3]);
+ risc_attr = be32_to_cpu(fw_array[9]);
+
+ buf_size_without_sfub = risc_size;
+ fw_array += risc_size;
+ break;
+
+ default:
+ ql_log(ql_log_warn + ql_dbg_verbose, vha,
+ 0xffff, "Secure region %x not supported\n",
region.code);
rval = QLA_COMMAND_ERROR;
goto done;
@@ -2880,7 +2897,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
"Sending Secure Flash MB Cmd\n");
rval = qla28xx_secure_flash_update(vha, 0, region.code,
buf_size_without_sfub, sfub_dma,
- sizeof(struct secure_flash_update_block));
+ sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xffff,
"Secure Flash MB Cmd failed %x.", rval);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 51b275a575a5..68c14143e50e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work)
}
}
+ sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
@@ -1160,7 +1161,6 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
- INIT_WORK(&sess->free_work, qlt_free_session_done);
queue_work(sess->vha->hw->wq, &sess->free_work);
}
EXPORT_SYMBOL(qlt_unreg_sess);
@@ -1265,7 +1265,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
- INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
@@ -4804,6 +4803,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
switch (sess->disc_state) {
case DSC_DELETED:
+ case DSC_LOGIN_PEND:
qlt_plogi_ack_unref(vha, pla);
break;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 042a24314edc..abe7f79bb789 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
*/
static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
+ if (!mcmd)
+ return;
INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
}
@@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
target_sess_cmd_list_set_waiting(se_sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sess->explicit_logout = 1;
tcm_qla2xxx_put_sess(sess);
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8c674eca09f1..2323432a0edb 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
return QLA_SUCCESS;
mem_alloc_error_exit:
- qla4xxx_mem_free(ha);
return QLA_ERROR;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 417b868d8735..ed8d9709b9b9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -24,6 +24,8 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
+#define ISCSI_SEND_MAX_ALLOWED 10
+
#define CREATE_TRACE_POINTS
#include <trace/events/iscsi.h>
@@ -3682,6 +3684,7 @@ iscsi_if_rx(struct sk_buff *skb)
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
uint32_t group;
+ int retries = ISCSI_SEND_MAX_ALLOWED;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
@@ -3712,6 +3715,10 @@ iscsi_if_rx(struct sk_buff *skb)
break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
+ if (err == -EAGAIN && --retries < 0) {
+ printk(KERN_WARNING "Send reply failed, error %d\n", err);
+ break;
+ }
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7b7ef3acb504..412ac56ecd60 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -8689,11 +8689,11 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
data.delete_operational_queue.queue_id) != 12);
BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.create_operational_iq) != 64 - 11);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.create_operational_oq) != 64 - 11);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.delete_operational_queue) != 64 - 11);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index b2af04c57a39..6feeb0faf123 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -99,6 +99,12 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
*/
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+ /*
+ * Disabling Autohibern8 feature in cadence UFS
+ * to mask unexpected interrupt trigger.
+ */
+ hba->ahit = 0;
+
return 0;
}
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index baeecee35d1e..53dd87628cbe 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -203,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
bsg_dev->parent = get_device(parent);
bsg_dev->release = ufs_bsg_node_release;
- dev_set_name(bsg_dev, "ufs-bsg");
+ dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
ret = device_add(bsg_dev);
if (ret)
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 2aac1e000977..51c665a924b7 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -805,8 +805,8 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
s32 create_file(struct inode *inode, struct chain_t *p_dir,
struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid);
void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry);
-s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
- struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid);
s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
struct chain_t *p_newdir, struct uni_name_t *p_uniname,
struct file_id_t *fid);
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index d2d3447083c7..794000e7bc6f 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -192,8 +192,6 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-
#ifdef CONFIG_EXFAT_DISCARD
if (opts->discard) {
ret = sb_issue_discard(sb, START_SECTOR(clu),
@@ -202,9 +200,13 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
if (ret == -EOPNOTSUPP) {
pr_warn("discard not supported by device, disabling");
opts->discard = 0;
+ } else {
+ return ret;
}
}
#endif /* CONFIG_EXFAT_DISCARD */
+
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
}
static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
@@ -2322,8 +2324,8 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
}
-s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
- struct uni_name_t *p_uniname, struct file_id_t *fid)
+s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid)
{
s32 ret, newentry = -1, num_old_entries, num_new_entries;
sector_t sector_old, sector_new;
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 6e481908c59f..9f91853b189b 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -1262,8 +1262,8 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
fs_set_vol_flags(sb, VOL_DIRTY);
if (olddir.dir == newdir.dir)
- ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name,
- fid);
+ ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
+ &uni_name, fid);
else
ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
&uni_name, fid);
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index e763205e9e4f..f61e373c75e9 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -63,11 +63,17 @@ static int init_display(struct fbtft_par *par)
{
int ret;
- /* Set CS active high */
- par->spi->mode |= SPI_CS_HIGH;
+ /*
+ * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
+ * work with GPIO based chip selects that are logically active high
+ * but inverted inside the GPIO library, so enforce inverted
+ * semantics.
+ */
+ par->spi->mode ^= SPI_CS_HIGH;
ret = spi_setup(par->spi);
if (ret) {
- dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+ dev_err(par->info->device,
+ "Could not set inverse CS polarity\n");
return ret;
}
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 27cc8eabcbe9..76b25df376b8 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -150,10 +150,17 @@ static int init_display(struct fbtft_par *par)
/* enable SPI interface by having CS and MOSI low during reset */
save_mode = par->spi->mode;
- par->spi->mode |= SPI_CS_HIGH;
- ret = spi_setup(par->spi); /* set CS inactive low */
+ /*
+ * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
+ * work with GPIO based chip selects that are logically active high
+ * but inverted inside the GPIO library, so enforce inverted
+ * semantics.
+ */
+ par->spi->mode ^= SPI_CS_HIGH;
+ ret = spi_setup(par->spi);
if (ret) {
- dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+ dev_err(par->info->device,
+ "Could not set inverse CS polarity\n");
return ret;
}
write_reg(par, 0x00); /* make sure mode is set */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index ffb84987dd86..d3e098b41b1a 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -913,7 +913,7 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
if (count == 0)
return -EINVAL;
- values = kmalloc_array(count, sizeof(*values), GFP_KERNEL);
+ values = kmalloc_array(count + 1, sizeof(*values), GFP_KERNEL);
if (!values)
return -ENOMEM;
@@ -926,9 +926,9 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
index = -1;
- while (index < count) {
- val = values[++index];
+ val = values[++index];
+ while (index < count) {
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
i = 0;
diff --git a/drivers/staging/hp/Kconfig b/drivers/staging/hp/Kconfig
index fb395cfe6b92..f20ab21a6b2a 100644
--- a/drivers/staging/hp/Kconfig
+++ b/drivers/staging/hp/Kconfig
@@ -6,6 +6,7 @@
config NET_VENDOR_HP
bool "HP devices"
default y
+ depends on ETHERNET
depends on ISA || EISA || PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
index 1b9b43659bdf..a20c0bfa68f3 100644
--- a/drivers/staging/isdn/gigaset/usb-gigaset.c
+++ b/drivers/staging/isdn/gigaset/usb-gigaset.c
@@ -571,8 +571,7 @@ static int gigaset_initcshw(struct cardstate *cs)
{
struct usb_cardstate *ucs;
- cs->hw.usb = ucs =
- kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
+ cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
if (!ucs) {
pr_err("out of memory\n");
return -ENOMEM;
@@ -584,9 +583,6 @@ static int gigaset_initcshw(struct cardstate *cs)
ucs->bchars[3] = 0;
ucs->bchars[4] = 0x11;
ucs->bchars[5] = 0x13;
- ucs->bulk_out_buffer = NULL;
- ucs->bulk_out_urb = NULL;
- ucs->read_urb = NULL;
tasklet_init(&cs->write_tasklet,
gigaset_modem_fill, (unsigned long) cs);
@@ -685,6 +681,11 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 2) {
+ dev_err(&interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
/* allocate memory for our device state and initialize it */
@@ -704,6 +705,12 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[0].desc;
+ if (!usb_endpoint_is_bulk_out(endpoint)) {
+ dev_err(&interface->dev, "missing bulk-out endpoint\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
ucs->bulk_out_size = buffer_size;
ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
@@ -723,6 +730,12 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[1].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&interface->dev, "missing int-in endpoint\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
ucs->busy = 0;
ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 5319909eb2f6..e7f4ddcc1361 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -3,6 +3,7 @@ config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
depends on NETDEVICES
+ depends on BROKEN
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index a6886cc5654c..56d116d79e56 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -41,7 +41,7 @@ struct ql_stats {
int stat_offset;
};
-#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m)
#define QL_OFF(m) offsetof(struct ql_adapter, m)
static const struct ql_stats ql_gstrings_stats[] = {
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 4fac9dca798e..a7cac0719b8b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -70,7 +70,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
phost_conf = pusbd->actconfig;
pconf_desc = &phost_conf->desc;
- phost_iface = &usb_intf->altsetting[0];
+ phost_iface = usb_intf->cur_altsetting;
piface_desc = &phost_iface->desc;
pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index ba1288297ee4..a87562f632a7 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -247,7 +247,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
pdvobjpriv->padapter = padapter;
padapter->eeprom_address_size = 6;
- phost_iface = &pintf->altsetting[0];
+ phost_iface = pintf->cur_altsetting;
piface_desc = &phost_iface->desc;
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
if (pusbd->speed == USB_SPEED_HIGH) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 02148a24818a..4458c1e60fa3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3309,7 +3309,7 @@ static int __init vchiq_driver_init(void)
return 0;
region_unregister:
- platform_driver_unregister(&vchiq_driver);
+ unregister_chrdev_region(vchiq_devid, 1);
class_destroy:
class_destroy(vchiq_class);
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index b722e9773232..df2640a79f02 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -679,7 +679,7 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct ieee80211_sta *sta = control ? control->sta : NULL;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info,
+ size_t driver_data_room = sizeof_field(struct ieee80211_tx_info,
rate_driver_data);
compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index ac136663fa8e..082c16a31616 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -4,6 +4,7 @@ config PRISM2_USB
depends on WLAN && USB && CFG80211
select WIRELESS_EXT
select WEXT_PRIV
+ select CRC32
help
This is the wlan-ng prism 2.5/3 USB driver for a wide range of
old USB wireless devices.
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index e877b917c15f..30ea37e1a3f5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -708,7 +708,7 @@ static int __init cxgbit_init(void)
pr_info("%s dcb enabled.\n", DRV_NAME);
register_dcbevent_notifier(&cxgbit_dcbevent_nb);
#endif
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
sizeof(union cxgbit_skb_cb));
return 0;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 59b79fc48266..79b27865c6f4 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -108,7 +108,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
bool "power_allocator"
- select THERMAL_GOV_POWER_ALLOCATOR
+ depends on THERMAL_GOV_POWER_ALLOCATOR
help
Select this if you want to control temperature based on
system and device power allocation. This governor can only
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 8b0ea8c70d73..635cf0466b59 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2124,10 +2124,11 @@ resubmit:
/*
* Start the modem : init the data and start kernel thread
*/
-static int uea_boot(struct uea_softc *sc)
+static int uea_boot(struct uea_softc *sc, struct usb_interface *intf)
{
- int ret, size;
struct intr_pkt *intr;
+ int ret = -ENOMEM;
+ int size;
uea_enters(INS_TO_USBDEV(sc));
@@ -2152,6 +2153,11 @@ static int uea_boot(struct uea_softc *sc)
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
+ ret = -ENODEV;
+ goto err0;
+ }
+
intr = kmalloc(size, GFP_KERNEL);
if (!intr)
goto err0;
@@ -2163,8 +2169,7 @@ static int uea_boot(struct uea_softc *sc)
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
intr, size, uea_intr, sc,
- sc->usb_dev->actconfig->interface[0]->altsetting[0].
- endpoint[0].desc.bInterval);
+ intf->cur_altsetting->endpoint[0].desc.bInterval);
ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
if (ret < 0) {
@@ -2179,6 +2184,7 @@ static int uea_boot(struct uea_softc *sc)
sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
if (IS_ERR(sc->kthread)) {
uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
+ ret = PTR_ERR(sc->kthread);
goto err2;
}
@@ -2193,7 +2199,7 @@ err1:
kfree(intr);
err0:
uea_leaves(INS_TO_USBDEV(sc));
- return -ENOMEM;
+ return ret;
}
/*
@@ -2548,7 +2554,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
}
}
- ret = uea_boot(sc);
+ ret = uea_boot(sc, intf);
if (ret < 0)
goto error;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index dbea28495e1d..4e12a32ca392 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1275,7 +1275,7 @@ EXPORT_SYMBOL_GPL(usbatm_usb_disconnect);
static int __init usbatm_usb_init(void)
{
- if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
+ if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) {
printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
return -EIO;
}
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 87338f9eb5be..ed204cbb63ea 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -156,7 +156,8 @@ static int usb_conn_probe(struct platform_device *pdev)
info->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(info->vbus)) {
- dev_err(dev, "failed to get vbus\n");
+ if (PTR_ERR(info->vbus) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get vbus\n");
return PTR_ERR(info->vbus);
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 281568d464f9..aa45840d8273 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1409,7 +1409,17 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
- if (hcd_uses_dma(hcd)) {
+ if (hcd->localmem_pool) {
+ ret = hcd_alloc_coherent(
+ urb->dev->bus, mem_flags,
+ &urb->setup_dma,
+ (void **)&urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
+ } else if (hcd_uses_dma(hcd)) {
if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
@@ -1424,23 +1434,22 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
urb->setup_dma))
return -EAGAIN;
urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
- } else if (hcd->localmem_pool) {
- ret = hcd_alloc_coherent(
- urb->dev->bus, mem_flags,
- &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
}
}
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd_uses_dma(hcd)) {
+ if (hcd->localmem_pool) {
+ ret = hcd_alloc_coherent(
+ urb->dev->bus, mem_flags,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
+ } else if (hcd_uses_dma(hcd)) {
if (urb->num_sgs) {
int n;
@@ -1491,15 +1500,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
else
urb->transfer_flags |= URB_DMA_MAP_SINGLE;
}
- } else if (hcd->localmem_pool) {
- ret = hcd_alloc_coherent(
- urb->dev->bus, mem_flags,
- &urb->transfer_dma,
- &urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- if (ret == 0)
- urb->transfer_flags |= URB_MAP_LOCAL;
}
if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
URB_SETUP_MAP_LOCAL)))
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 0eab79f82ce4..da923ec17612 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -45,6 +45,7 @@ void usb_init_urb(struct urb *urb)
if (urb) {
memset(urb, 0, sizeof(*urb));
kref_init(&urb->kref);
+ INIT_LIST_HEAD(&urb->urb_list);
INIT_LIST_HEAD(&urb->anchor_list);
}
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 023f0357efd7..294276f7deb9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,7 +29,8 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
-#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
+#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee
+#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -308,6 +309,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3996b9c4ff8d..fd1b100d2927 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1117,6 +1117,9 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+ u8 cmd;
+
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
@@ -1129,7 +1132,12 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
case DWC3_DEPEVT_XFERINPROGRESS:
case DWC3_DEPEVT_RXTXFIFOEVT:
case DWC3_DEPEVT_STREAMEVT:
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
+ cmd = DEPEVT_PARAMETER_CMD(event->parameters);
+
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a9aba716bf80..0c960a97ea02 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2491,7 +2491,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req) &&
+ if (!dwc3_gadget_ep_request_completed(req) ||
req->num_pending_sgs) {
__dwc3_gadget_kick_transfer(dep);
goto out;
@@ -2719,6 +2719,9 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ if (!interrupt)
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+
if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
udelay(100);
}
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 6ce044008cf6..460d5d7c984f 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f)
DBG(cdev, "ecm deactivated\n");
- if (ecm->port.in_ep->enabled)
+ if (ecm->port.in_ep->enabled) {
gether_disconnect(&ecm->port);
+ } else {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
usb_ep_disable(ecm->notify);
ecm->notify->desc = NULL;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ce1d0235969c..0bbccac94d6c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3509,7 +3509,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- if (strlen(name) >= FIELD_SIZEOF(struct ffs_dev, name))
+ if (strlen(name) >= sizeof_field(struct ffs_dev, name))
return -ENAMETOOLONG;
return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
}
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index d48df36622b7..0d8e4a364ca6 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f)
gether_disconnect(&rndis->port);
usb_ep_disable(rndis->notify);
+ rndis->notify->desc = NULL;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index b7d23c438756..7a3a29e5e9d2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -806,7 +806,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
u32 *status, u32 portsc,
- unsigned long flags)
+ unsigned long *flags)
{
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
@@ -860,11 +860,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irqrestore(&xhci->lock, *flags);
time_left = wait_for_completion_timeout(
&bus_state->rexit_done[wIndex],
msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS));
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
@@ -920,11 +920,13 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
{
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
+ struct usb_hcd *hcd;
u32 link_state;
u32 portnum;
bus_state = &port->rhub->bus_state;
xhci = hcd_to_xhci(port->rhub->hcd);
+ hcd = port->rhub->hcd;
link_state = portsc & PORT_PLS_MASK;
portnum = port->hcd_portnum;
@@ -952,12 +954,20 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
bus_state->suspended_ports &= ~(1 << portnum);
}
+ /* remote wake resume signaling complete */
+ if (bus_state->port_remote_wakeup & (1 << portnum) &&
+ link_state != XDEV_RESUME &&
+ link_state != XDEV_RECOVERY) {
+ bus_state->port_remote_wakeup &= ~(1 << portnum);
+ usb_hcd_end_port_resume(&hcd->self, portnum);
+ }
+
xhci_hub_report_usb3_link_state(xhci, status, portsc);
xhci_del_comp_mod_timer(xhci, portsc, portnum);
}
static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
- u32 portsc, unsigned long flags)
+ u32 portsc, unsigned long *flags)
{
struct xhci_bus_state *bus_state;
u32 link_state;
@@ -1007,7 +1017,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_bus_state *bus_state,
u16 wIndex, u32 raw_port_status,
- unsigned long flags)
+ unsigned long *flags)
__releases(&xhci->lock)
__acquires(&xhci->lock)
{
@@ -1130,7 +1140,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
trace_xhci_get_port_status(wIndex, temp);
status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
- flags);
+ &flags);
if (status == 0xffffffff)
goto error;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index e16eda6e2b8b..3b1388fa2f36 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1909,13 +1909,17 @@ no_bw:
xhci->usb3_rhub.num_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_rhub.ports);
+ kfree(xhci->usb2_rhub.psi);
kfree(xhci->usb3_rhub.ports);
+ kfree(xhci->usb3_rhub.psi);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
xhci->usb2_rhub.ports = NULL;
+ xhci->usb2_rhub.psi = NULL;
xhci->usb3_rhub.ports = NULL;
+ xhci->usb3_rhub.psi = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index a0025d23b257..2907fe4d78dd 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -521,6 +521,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
}
#endif /* CONFIG_PM */
+static void xhci_pci_shutdown(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ xhci_shutdown(hcd);
+
+ /* Yet another workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
/*-------------------------------------------------------------------------*/
/* PCI driver selection metadata; PCI hotplugging uses this */
@@ -556,6 +568,7 @@ static int __init xhci_pci_init(void)
#ifdef CONFIG_PM
xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend;
xhci_pci_hc_driver.pci_resume = xhci_pci_resume;
+ xhci_pci_hc_driver.shutdown = xhci_pci_shutdown;
#endif
return pci_register_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6475c3d3b43b..d23f7408c81f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1628,7 +1628,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
if (slot_id && xhci->devs[slot_id])
xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
- bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1648,6 +1647,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
*/
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+ usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
xhci_set_link_state(xhci, port, XDEV_U0);
/* Need to wait until the next link state change
* indicates the device is actually in U0.
@@ -1688,7 +1688,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
- bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
hcd_portnum + 1);
@@ -2382,7 +2381,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
break;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
+ ep_ring->last_td_was_short)
trb_comp_code = COMP_SHORT_PACKET;
else
xhci_warn_ratelimited(xhci,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6721d059f58a..dbac0fa9748d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -770,7 +770,7 @@ static void xhci_stop(struct usb_hcd *hcd)
*
* This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
-static void xhci_shutdown(struct usb_hcd *hcd)
+void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -789,11 +789,8 @@ static void xhci_shutdown(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_shutdown completed - status = %x",
readl(&xhci->op_regs->status));
-
- /* Yet another workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
- pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
}
+EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
@@ -973,7 +970,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
{
int rc = 0;
- unsigned int delay = XHCI_MAX_HALT_USEC;
+ unsigned int delay = XHCI_MAX_HALT_USEC * 2;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
u32 res;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index dc6f62a4b197..13d8838cd552 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2050,6 +2050,7 @@ int xhci_start(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 6f5edb9fc61e..d8d157c4c271 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -669,7 +669,7 @@ static int adu_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- res = usb_find_common_endpoints_reverse(&interface->altsetting[0],
+ res = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
&dev->interrupt_in_endpoint,
&dev->interrupt_out_endpoint);
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 4afb5ddfd361..e9437a176518 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -322,7 +322,7 @@ static int idmouse_probe(struct usb_interface *interface,
int result;
/* check if we have gotten the data or the hid interface */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ac2b4fcc265f..f48a23adbc35 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
- mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
- kfree(rp->b_vec);
- rp->b_vec = vec;
- rp->b_size = size;
- rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
- rp->cnt_lost = 0;
+ if (rp->mmap_active) {
+ mon_free_buff(vec, size/CHUNK_SIZE);
+ kfree(vec);
+ ret = -EBUSY;
+ } else {
+ mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+ kfree(rp->b_vec);
+ rp->b_vec = vec;
+ rp->b_size = size;
+ rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
+ rp->cnt_lost = 0;
+ }
spin_unlock_irqrestore(&rp->b_lock, flags);
mutex_unlock(&rp->fetch_lock);
}
@@ -1216,13 +1222,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait)
static void mon_bin_vma_open(struct vm_area_struct *vma)
{
struct mon_reader_bin *rp = vma->vm_private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active++;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
static void mon_bin_vma_close(struct vm_area_struct *vma)
{
+ unsigned long flags;
+
struct mon_reader_bin *rp = vma->vm_private_data;
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active--;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
/*
@@ -1234,16 +1248,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
- mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size) {
- mutex_unlock(&rp->fetch_lock);
+ if (offset >= rp->b_size)
return VM_FAULT_SIGBUS;
- }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
- mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 8273126ffdf4..63a00ff26655 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -169,8 +169,8 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- put_device(&sw->dev);
module_put(sw->dev.parent->driver->owner);
+ put_device(&sw->dev);
}
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 48a439298a68..9690a5f4b9d6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2901,16 +2901,18 @@ static int edge_startup(struct usb_serial *serial)
response = 0;
if (edge_serial->is_epic) {
+ struct usb_host_interface *alt;
+
+ alt = serial->interface->cur_altsetting;
+
/* EPIC thing, set up our interrupt polling now and our read
* urb, so that the device knows it really is connected. */
interrupt_in_found = bulk_in_found = bulk_out_found = false;
- for (i = 0; i < serial->interface->altsetting[0]
- .desc.bNumEndpoints; ++i) {
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
struct usb_endpoint_descriptor *endpoint;
int buffer_size;
- endpoint = &serial->interface->altsetting[0].
- endpoint[i].desc;
+ endpoint = &alt->endpoint[i].desc;
buffer_size = usb_endpoint_maxp(endpoint);
if (!interrupt_in_found &&
(usb_endpoint_is_int_in(endpoint))) {
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 66a4dcbbb1fc..f4c2359abb1b 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -135,7 +135,8 @@ static int slave_configure(struct scsi_device *sdev)
* For such controllers we need to make sure the block layer sets
* up bounce buffers in addressable memory.
*/
- if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)))
+ if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
+ (bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL))
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
/*
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 7ece6ca6e690..91d62276b56f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1612,14 +1612,16 @@ struct typec_port *typec_register_port(struct device *parent,
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
+ ret = PTR_ERR(port->sw);
put_device(&port->dev);
- return ERR_CAST(port->sw);
+ return ERR_PTR(ret);
}
port->mux = typec_mux_get(&port->dev, NULL);
if (IS_ERR(port->mux)) {
+ ret = PTR_ERR(port->mux);
put_device(&port->dev);
- return ERR_CAST(port->mux);
+ return ERR_PTR(ret);
}
ret = device_add(&port->dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index e05679c478e2..93f995f6cf36 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -32,10 +32,11 @@
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
/* The order of free page blocks to report to host */
-#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1)
+#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1)
/* The size of a free page block in bytes */
-#define VIRTIO_BALLOON_FREE_PAGE_SIZE \
- (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT))
+#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
+ (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
+#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
#ifdef CONFIG_BALLOON_COMPACTION
static struct vfsmount *balloon_mnt;
@@ -380,7 +381,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
if (!page)
break;
free_pages((unsigned long)page_address(page),
- VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
vb->num_free_page_blocks -= num_returned;
spin_unlock_irq(&vb->free_page_list_lock);
@@ -582,7 +583,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
;
page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
- VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ VIRTIO_BALLOON_HINT_BLOCK_ORDER);
/*
* When the allocation returns NULL, it indicates that we have got all
* the possible free pages, so return -EINTR to stop.
@@ -591,13 +592,13 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
return -EINTR;
p = page_address(page);
- sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE);
+ sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
/* There is always 1 entry reserved for the cmd id to use. */
if (vq->num_free > 1) {
err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
if (unlikely(err)) {
free_pages((unsigned long)p,
- VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ VIRTIO_BALLOON_HINT_BLOCK_ORDER);
return err;
}
virtqueue_kick(vq);
@@ -610,7 +611,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
* The vq has no available entry to add this page block, so
* just free it.
*/
- free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
return 0;
@@ -721,6 +722,17 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
get_page(newpage); /* balloon reference */
+ /*
+ * When we migrate a page to a different zone and adjusted the
+ * managed page count when inflating, we have to fixup the count of
+ * both involved zones.
+ */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
+ page_zone(page) != page_zone(newpage)) {
+ adjust_managed_page_count(page, 1);
+ adjust_managed_page_count(newpage, -1);
+ }
+
/* balloon's page migration 1st step -- inflate "newpage" */
spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
balloon_page_insert(vb_dev_info, newpage);
@@ -765,11 +777,11 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
unsigned long blocks_to_free, blocks_freed;
pages_to_free = round_up(pages_to_free,
- 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER);
- blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ VIRTIO_BALLOON_HINT_BLOCK_PAGES);
+ blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
- return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
}
static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
@@ -826,7 +838,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
return count;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 4f2e78a5e4db..0c142bcab79d 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -394,7 +394,8 @@ static struct notifier_block xen_memory_nb = {
#else
static enum bp_state reserve_additional_memory(void)
{
- balloon_stats.target_pages = balloon_stats.current_pages;
+ balloon_stats.target_pages = balloon_stats.current_pages +
+ balloon_stats.target_unpopulated;
return BP_ECANCELED;
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 4150280509ff..7503899c0a1b 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -136,6 +136,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
ASSERTCMP(d_inode(dentry), ==, NULL);
+ if (flags & LOOKUP_CREATE)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (dentry->d_name.len >= AFSNAMEMAX) {
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index f532d6d3bd28..79bc5f1338ed 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -126,7 +126,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (src_as->cell)
ctx->cell = afs_get_cell(src_as->cell);
- if (size > PAGE_SIZE - 1)
+ if (size < 2 || size > PAGE_SIZE - 1)
return -EINVAL;
page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
@@ -140,7 +140,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
}
buf = kmap(page);
- ret = vfs_parse_fs_string(fc, "source", buf, size);
+ ret = -EINVAL;
+ if (buf[size - 1] == '.')
+ ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
kunmap(page);
put_page(page);
if (ret < 0)
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index fba2ec3a3a9c..468e1713bce1 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -213,13 +213,14 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
/* Display header on line 1 */
if (v == &cell->proc_volumes) {
- seq_puts(m, "USE VID TY\n");
+ seq_puts(m, "USE VID TY NAME\n");
return 0;
}
- seq_printf(m, "%3d %08llx %s\n",
+ seq_printf(m, "%3d %08llx %s %s\n",
atomic_read(&vol->usage), vol->vid,
- afs_vol_types[vol->type]);
+ afs_vol_types[vol->type],
+ vol->name);
return 0;
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 1686bf188ccd..b7f3cb2130ca 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -32,18 +32,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net)
struct afs_server *afs_find_server(struct afs_net *net,
const struct sockaddr_rxrpc *srx)
{
- const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
const struct afs_addr_list *alist;
struct afs_server *server = NULL;
unsigned int i;
- bool ipv6 = true;
int seq = 0, diff;
- if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
- srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
- srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
- ipv6 = false;
-
rcu_read_lock();
do {
@@ -52,7 +45,8 @@ struct afs_server *afs_find_server(struct afs_net *net,
server = NULL;
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
- if (ipv6) {
+ if (srx->transport.family == AF_INET6) {
+ const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
alist = rcu_dereference(server->addresses);
for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
@@ -68,15 +62,16 @@ struct afs_server *afs_find_server(struct afs_net *net,
}
}
} else {
+ const struct sockaddr_in *a = &srx->transport.sin, *b;
hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
alist = rcu_dereference(server->addresses);
for (i = 0; i < alist->nr_ipv4; i++) {
- b = &alist->addrs[i].transport.sin6;
- diff = ((u16 __force)a->sin6_port -
- (u16 __force)b->sin6_port);
+ b = &alist->addrs[i].transport.sin;
+ diff = ((u16 __force)a->sin_port -
+ (u16 __force)b->sin_port);
if (diff == 0)
- diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
- (u32 __force)b->sin6_addr.s6_addr32[3]);
+ diff = ((u32 __force)a->sin_addr.s_addr -
+ (u32 __force)b->sin_addr.s_addr);
if (diff == 0)
goto found;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 488641b1a418..7f8a9b3137bf 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -404,6 +404,7 @@ static int afs_test_super(struct super_block *sb, struct fs_context *fc)
return (as->net_ns == fc->net_ns &&
as->volume &&
as->volume->vid == ctx->volume->vid &&
+ as->cell == ctx->cell &&
!as->dyn_root);
}
@@ -448,7 +449,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* allocate the root inode and dentry */
if (as->dyn_root) {
inode = afs_iget_pseudo_dir(sb, true);
- sb->s_flags |= SB_RDONLY;
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 75b6d10c9845..575636f6491e 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -7,6 +7,7 @@ config BTRFS_FS
select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
+ select CRYPTO_BLAKE2B
select ZLIB_INFLATE
select ZLIB_DEFLATE
select LZO_COMPRESS
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f5a38910a82b..9d09bb53c1ab 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1011,18 +1011,13 @@ static int __ceph_is_single_caps(struct ceph_inode_info *ci)
return rb_first(&ci->i_caps) == rb_last(&ci->i_caps);
}
-static int __ceph_is_any_caps(struct ceph_inode_info *ci)
-{
- return !RB_EMPTY_ROOT(&ci->i_caps);
-}
-
int ceph_is_any_caps(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int ret;
spin_lock(&ci->i_ceph_lock);
- ret = __ceph_is_any_caps(ci);
+ ret = __ceph_is_any_real_caps(ci);
spin_unlock(&ci->i_ceph_lock);
return ret;
@@ -1099,15 +1094,16 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
if (removed)
ceph_put_cap(mdsc, cap);
- /* when reconnect denied, we remove session caps forcibly,
- * i_wr_ref can be non-zero. If there are ongoing write,
- * keep i_snap_realm.
- */
- if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
- drop_inode_snap_realm(ci);
+ if (!__ceph_is_any_real_caps(ci)) {
+ /* when reconnect denied, we remove session caps forcibly,
+ * i_wr_ref can be non-zero. If there are ongoing write,
+ * keep i_snap_realm.
+ */
+ if (ci->i_wr_ref == 0 && ci->i_snap_realm)
+ drop_inode_snap_realm(ci);
- if (!__ceph_is_any_real_caps(ci))
__cap_delay_cancel(mdsc, ci);
+ }
}
struct cap_msg_args {
@@ -2764,7 +2760,19 @@ int ceph_get_caps(struct file *filp, int need, int want,
if (ret == -EAGAIN)
continue;
if (!ret) {
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct cap_wait cw;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ cw.ino = inode->i_ino;
+ cw.tgid = current->tgid;
+ cw.need = need;
+ cw.want = want;
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_add(&cw.list, &mdsc->cap_wait_list);
+ spin_unlock(&mdsc->caps_list_lock);
+
add_wait_queue(&ci->i_cap_wq, &wait);
flags |= NON_BLOCKING;
@@ -2778,6 +2786,11 @@ int ceph_get_caps(struct file *filp, int need, int want,
}
remove_wait_queue(&ci->i_cap_wq, &wait);
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_del(&cw.list);
+ spin_unlock(&mdsc->caps_list_lock);
+
if (ret == -EAGAIN)
continue;
}
@@ -2928,7 +2941,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
ci->i_head_snapc = NULL;
}
/* see comment in __ceph_remove_cap() */
- if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
+ if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
drop_inode_snap_realm(ci);
}
spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index facb387c2735..c281f32b54f7 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -139,6 +139,7 @@ static int caps_show(struct seq_file *s, void *p)
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
int total, avail, used, reserved, min, i;
+ struct cap_wait *cw;
ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
@@ -166,6 +167,18 @@ static int caps_show(struct seq_file *s, void *p)
}
mutex_unlock(&mdsc->mutex);
+ seq_printf(s, "\n\nWaiters:\n--------\n");
+ seq_printf(s, "tgid ino need want\n");
+ seq_printf(s, "-----------------------------------------------------\n");
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_for_each_entry(cw, &mdsc->cap_wait_list, list) {
+ seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino,
+ ceph_cap_string(cw->need),
+ ceph_cap_string(cw->want));
+ }
+ spin_unlock(&mdsc->caps_list_lock);
+
return 0;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 068b029cf073..374db1bd57d1 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2015,7 +2015,7 @@ void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
if (!nr)
return;
val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
- if (!(val % CEPH_CAPS_PER_RELEASE)) {
+ if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
atomic_set(&mdsc->cap_reclaim_pending, 0);
ceph_queue_cap_reclaim_work(mdsc);
}
@@ -2032,12 +2032,13 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
- int order, num_entries;
+ unsigned int num_entries;
+ int order;
spin_lock(&ci->i_ceph_lock);
num_entries = ci->i_files + ci->i_subdirs;
spin_unlock(&ci->i_ceph_lock);
- num_entries = max(num_entries, 1);
+ num_entries = max(num_entries, 1U);
num_entries = min(num_entries, opt->max_readdir);
order = get_order(size * num_entries);
@@ -4168,6 +4169,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
+ INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 5cd131b41d84..14c7e8c49970 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -340,6 +340,14 @@ struct ceph_quotarealm_inode {
struct inode *inode;
};
+struct cap_wait {
+ struct list_head list;
+ unsigned long ino;
+ pid_t tgid;
+ int need;
+ int want;
+};
+
/*
* mds client state
*/
@@ -416,6 +424,7 @@ struct ceph_mds_client {
spinlock_t caps_list_lock;
struct list_head caps_list; /* unused (reserved or
unreserved) */
+ struct list_head cap_wait_list;
int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */
int caps_use_max; /* max used caps */
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index aeec1d6e3769..471bac335fae 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -158,6 +158,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
void *pexport_targets = NULL;
struct ceph_timespec laggy_since;
struct ceph_mds_info *info;
+ bool laggy;
ceph_decode_need(p, end, sizeof(u64) + 1, bad);
global_id = ceph_decode_64(p);
@@ -190,6 +191,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
if (err)
goto corrupt;
ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
+ laggy = laggy_since.tv_sec != 0 || laggy_since.tv_nsec != 0;
*p += sizeof(u32);
ceph_decode_32_safe(p, end, namelen, bad);
*p += namelen;
@@ -207,10 +209,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
*p = info_end;
}
- dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
+ dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
i+1, n, global_id, mds, inc,
ceph_pr_addr(&addr),
- ceph_mds_state_name(state));
+ ceph_mds_state_name(state),
+ laggy ? "(laggy)" : "");
if (mds < 0 || state <= 0)
continue;
@@ -230,8 +233,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
info->global_id = global_id;
info->state = state;
info->addr = addr;
- info->laggy = (laggy_since.tv_sec != 0 ||
- laggy_since.tv_nsec != 0);
+ info->laggy = laggy;
info->num_export_targets = num_export_targets;
if (num_export_targets) {
info->export_targets = kcalloc(num_export_targets,
@@ -355,6 +357,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_damaged = false;
}
bad_ext:
+ dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+ !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
*p = end;
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
return m;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9c9a7c68eea3..29a795f975df 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -172,10 +172,10 @@ static const struct fs_parameter_enum ceph_mount_param_enums[] = {
static const struct fs_parameter_spec ceph_mount_param_specs[] = {
fsparam_flag_no ("acl", Opt_acl),
fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
- fsparam_u32 ("caps_max", Opt_caps_max),
+ fsparam_s32 ("caps_max", Opt_caps_max),
fsparam_u32 ("caps_wanted_delay_max", Opt_caps_wanted_delay_max),
fsparam_u32 ("caps_wanted_delay_min", Opt_caps_wanted_delay_min),
- fsparam_s32 ("write_congestion_kb", Opt_congestion_kb),
+ fsparam_u32 ("write_congestion_kb", Opt_congestion_kb),
fsparam_flag_no ("copyfrom", Opt_copyfrom),
fsparam_flag_no ("dcache", Opt_dcache),
fsparam_flag_no ("dirstat", Opt_dirstat),
@@ -187,8 +187,8 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
fsparam_flag_no ("quotadf", Opt_quotadf),
fsparam_u32 ("rasize", Opt_rasize),
fsparam_flag_no ("rbytes", Opt_rbytes),
- fsparam_s32 ("readdir_max_bytes", Opt_readdir_max_bytes),
- fsparam_s32 ("readdir_max_entries", Opt_readdir_max_entries),
+ fsparam_u32 ("readdir_max_bytes", Opt_readdir_max_bytes),
+ fsparam_u32 ("readdir_max_entries", Opt_readdir_max_entries),
fsparam_enum ("recover_session", Opt_recover_session),
fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
fsparam_u32 ("rsize", Opt_rsize),
@@ -328,7 +328,9 @@ static int ceph_parse_mount_param(struct fs_context *fc,
fsopt->caps_wanted_delay_max = result.uint_32;
break;
case Opt_caps_max:
- fsopt->caps_max = result.uint_32;
+ if (result.int_32 < 0)
+ goto out_of_range;
+ fsopt->caps_max = result.int_32;
break;
case Opt_readdir_max_entries:
if (result.uint_32 < 1)
@@ -547,25 +549,25 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_show_option(m, "recover_session", "clean");
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
- seq_printf(m, ",wsize=%d", fsopt->wsize);
+ seq_printf(m, ",wsize=%u", fsopt->wsize);
if (fsopt->rsize != CEPH_MAX_READ_SIZE)
- seq_printf(m, ",rsize=%d", fsopt->rsize);
+ seq_printf(m, ",rsize=%u", fsopt->rsize);
if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
- seq_printf(m, ",rasize=%d", fsopt->rasize);
+ seq_printf(m, ",rasize=%u", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb())
- seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
+ seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
if (fsopt->caps_max)
seq_printf(m, ",caps_max=%d", fsopt->caps_max);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
- seq_printf(m, ",caps_wanted_delay_min=%d",
+ seq_printf(m, ",caps_wanted_delay_min=%u",
fsopt->caps_wanted_delay_min);
if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
- seq_printf(m, ",caps_wanted_delay_max=%d",
+ seq_printf(m, ",caps_wanted_delay_max=%u",
fsopt->caps_wanted_delay_max);
if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
- seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
+ seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
- seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
+ seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
seq_show_option(m, "snapdirname", fsopt->snapdir_name);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index f0f9cb7447ac..3bf1a01cd536 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -73,16 +73,16 @@
#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
struct ceph_mount_options {
- int flags;
+ unsigned int flags;
- int wsize; /* max write size */
- int rsize; /* max read size */
- int rasize; /* max readahead */
- int congestion_kb; /* max writeback in flight */
- int caps_wanted_delay_min, caps_wanted_delay_max;
+ unsigned int wsize; /* max write size */
+ unsigned int rsize; /* max read size */
+ unsigned int rasize; /* max readahead */
+ unsigned int congestion_kb; /* max writeback in flight */
+ unsigned int caps_wanted_delay_min, caps_wanted_delay_max;
int caps_max;
- int max_readdir; /* max readdir result (entires) */
- int max_readdir_bytes; /* max readdir result (bytes) */
+ unsigned int max_readdir; /* max readdir result (entries) */
+ unsigned int max_readdir_bytes; /* max readdir result (bytes) */
/*
* everything above this point can be memcmp'd; everything below
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index fd0262ce5ad5..ce9bac756c2a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1061,7 +1061,7 @@ cap_unix(struct cifs_ses *ses)
struct cached_fid {
bool is_valid:1; /* Do we have a useable root fid */
bool file_all_info_is_valid:1;
-
+ bool has_lease:1;
struct kref refcount;
struct cifs_fid *fid;
struct mutex fid_mutex;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4f554f019a98..cc86a67225d1 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -42,6 +42,7 @@
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
+#include "smb2proto.h"
#include "fscache.h"
#include "smbdirect.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -112,6 +113,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
mutex_lock(&tcon->crfid.fid_mutex);
tcon->crfid.is_valid = false;
+ /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
+ close_shroot_lease_locked(&tcon->crfid);
memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
mutex_unlock(&tcon->crfid.fid_mutex);
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 18c7a33adceb..5ef5e97a6d13 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -95,6 +95,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
goto finished;
}
+ memset(&oparms, 0, sizeof(struct cifs_open_parms));
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = create_disposition;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a5c96bc522cb..6250370c1170 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -616,6 +616,7 @@ smb2_close_cached_fid(struct kref *ref)
cfid->fid->volatile_fid);
cfid->is_valid = false;
cfid->file_all_info_is_valid = false;
+ cfid->has_lease = false;
}
}
@@ -626,13 +627,28 @@ void close_shroot(struct cached_fid *cfid)
mutex_unlock(&cfid->fid_mutex);
}
+void close_shroot_lease_locked(struct cached_fid *cfid)
+{
+ if (cfid->has_lease) {
+ cfid->has_lease = false;
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+}
+
+void close_shroot_lease(struct cached_fid *cfid)
+{
+ mutex_lock(&cfid->fid_mutex);
+ close_shroot_lease_locked(cfid);
+ mutex_unlock(&cfid->fid_mutex);
+}
+
void
smb2_cached_lease_break(struct work_struct *work)
{
struct cached_fid *cfid = container_of(work,
struct cached_fid, lease_break);
- close_shroot(cfid);
+ close_shroot_lease(cfid);
}
/*
@@ -773,6 +789,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
/* BB TBD check to see if oplock level check can be removed below */
if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
kref_get(&tcon->crfid.refcount);
+ tcon->crfid.has_lease = true;
smb2_parse_contexts(server, o_rsp,
&oparms.fid->epoch,
oparms.fid->lease_key, &oplock, NULL);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0ab6b1200288..9434f6dd8df3 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1847,7 +1847,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
- close_shroot(&tcon->crfid);
+ close_shroot_lease(&tcon->crfid);
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
&total_len);
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index a18272c987fe..27d29f2eb6c8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -70,6 +70,8 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *pfid);
extern void close_shroot(struct cached_fid *cfid);
+extern void close_shroot_lease(struct cached_fid *cfid);
+extern void close_shroot_lease_locked(struct cached_fid *cfid);
extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
struct smb2_file_all_info *src);
extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 040df1f5e1c8..40cca351273f 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -151,7 +151,7 @@ static struct key *search_fscrypt_keyring(struct key *keyring,
}
#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \
- (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id))
+ (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id))
#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index a13a78725c57..b766c3ee5fa8 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -649,6 +649,8 @@ ssize_t erofs_listxattr(struct dentry *dentry,
struct listxattr_iter it;
ret = init_inode_xattrs(d_inode(dentry));
+ if (ret == -ENOATTR)
+ return 0;
if (ret)
return ret;
diff --git a/fs/file.c b/fs/file.c
index 3da91a112bab..2f4fcf985079 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -960,7 +960,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
return ksys_dup3(oldfd, newfd, 0);
}
-int ksys_dup(unsigned int fildes)
+SYSCALL_DEFINE1(dup, unsigned int, fildes)
{
int ret = -EBADF;
struct file *file = fget_raw(fildes);
@@ -975,11 +975,6 @@ int ksys_dup(unsigned int fildes)
return ret;
}
-SYSCALL_DEFINE1(dup, unsigned int, fildes)
-{
- return ksys_dup(fildes);
-}
-
int f_dupfd(unsigned int from, struct file *file, unsigned flags)
{
int err;
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 74b40506c5d9..90c4978781fb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -49,7 +49,6 @@ struct io_worker {
struct hlist_nulls_node nulls_node;
struct list_head all_list;
struct task_struct *task;
- wait_queue_head_t wait;
struct io_wqe *wqe;
struct io_wq_work *cur_work;
@@ -258,7 +257,7 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
if (io_worker_get(worker)) {
- wake_up(&worker->wait);
+ wake_up_process(worker->task);
io_worker_release(worker);
return true;
}
@@ -492,28 +491,46 @@ next:
} while (1);
}
+static inline void io_worker_spin_for_work(struct io_wqe *wqe)
+{
+ int i = 0;
+
+ while (++i < 1000) {
+ if (io_wqe_run_queue(wqe))
+ break;
+ if (need_resched())
+ break;
+ cpu_relax();
+ }
+}
+
static int io_wqe_worker(void *data)
{
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
- DEFINE_WAIT(wait);
+ bool did_work;
io_worker_start(wqe, worker);
+ did_work = false;
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
- prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
-
+ set_current_state(TASK_INTERRUPTIBLE);
+loop:
+ if (did_work)
+ io_worker_spin_for_work(wqe);
spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
- continue;
+ did_work = true;
+ goto loop;
}
+ did_work = false;
/* drops the lock on success, retry */
if (__io_worker_idle(wqe, worker)) {
__release(&wqe->lock);
- continue;
+ goto loop;
}
spin_unlock_irq(&wqe->lock);
if (signal_pending(current))
@@ -526,8 +543,6 @@ static int io_wqe_worker(void *data)
break;
}
- finish_wait(&worker->wait, &wait);
-
if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
spin_lock_irq(&wqe->lock);
if (!wq_list_empty(&wqe->work_list))
@@ -589,7 +604,6 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
refcount_set(&worker->ref, 1);
worker->nulls_node.pprev = NULL;
- init_waitqueue_head(&worker->wait);
worker->wqe = wqe;
spin_lock_init(&worker->lock);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 7c333a28e2a7..fb993b2bd0ef 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -35,7 +35,8 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
struct io_wq_work_list *list)
{
if (!list->first) {
- list->first = list->last = node;
+ list->last = node;
+ WRITE_ONCE(list->first, node);
} else {
list->last->next = node;
list->last = node;
@@ -47,7 +48,7 @@ static inline void wq_node_del(struct io_wq_work_list *list,
struct io_wq_work_node *prev)
{
if (node == list->first)
- list->first = node->next;
+ WRITE_ONCE(list->first, node->next);
if (node == list->last)
list->last = prev;
if (prev)
@@ -58,7 +59,7 @@ static inline void wq_node_del(struct io_wq_work_list *list,
#define wq_list_for_each(pos, prv, head) \
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
-#define wq_list_empty(list) ((list)->first == NULL)
+#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
#define INIT_WQ_LIST(list) do { \
(list)->first = NULL; \
(list)->last = NULL; \
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 405be10da73d..9b1833fedc5c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -293,7 +293,7 @@ struct io_poll_iocb {
__poll_t events;
bool done;
bool canceled;
- struct wait_queue_entry *wait;
+ struct wait_queue_entry wait;
};
struct io_timeout_data {
@@ -377,6 +377,7 @@ struct io_kiocb {
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
+#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
u64 user_data;
u32 result;
u32 sequence;
@@ -580,7 +581,9 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
switch (req->sqe->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
- do_hashed = true;
+ /* only regular files should be hashed for writes */
+ if (req->flags & REQ_F_ISREG)
+ do_hashed = true;
/* fall-through */
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
@@ -1292,6 +1295,12 @@ static void kiocb_end_write(struct io_kiocb *req)
file_end_write(req->file);
}
+static inline void req_set_fail_links(struct io_kiocb *req)
+{
+ if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+}
+
static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
@@ -1299,8 +1308,8 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
- if ((req->flags & REQ_F_LINK) && res != req->result)
- req->flags |= REQ_F_FAIL_LINK;
+ if (res != req->result)
+ req_set_fail_links(req);
io_cqring_add_event(req, res);
}
@@ -1330,8 +1339,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
- if ((req->flags & REQ_F_LINK) && res != req->result)
- req->flags |= REQ_F_FAIL_LINK;
+ if (res != req->result)
+ req_set_fail_links(req);
req->result = res;
if (res != -EAGAIN)
req->flags |= REQ_F_IOPOLL_COMPLETED;
@@ -1422,7 +1431,7 @@ static bool io_file_supports_async(struct file *file)
{
umode_t mode = file_inode(file)->i_mode;
- if (S_ISBLK(mode) || S_ISCHR(mode))
+ if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
return true;
if (S_ISREG(mode) && file->f_op != &io_uring_fops)
return true;
@@ -1858,7 +1867,9 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
goto copy_iov;
}
- if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
+ /* file path doesn't support NOWAIT for non-direct_IO */
+ if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
+ (req->flags & REQ_F_ISREG))
goto copy_iov;
iov_count = iov_iter_count(&iter);
@@ -1956,8 +1967,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
end > 0 ? end : LLONG_MAX,
fsync_flags & IORING_FSYNC_DATASYNC);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
@@ -2003,8 +2014,8 @@ static int io_sync_file_range(struct io_kiocb *req,
ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
@@ -2019,6 +2030,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
flags = READ_ONCE(sqe->msg_flags);
msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
+ io->msg.iov = io->msg.fast_iov;
return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov);
#else
return 0;
@@ -2054,7 +2066,6 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else {
kmsg = &io.msg.msg;
kmsg->msg_name = &addr;
- io.msg.iov = io.msg.fast_iov;
ret = io_sendmsg_prep(req, &io);
if (ret)
goto out;
@@ -2079,8 +2090,8 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
out:
io_cqring_add_event(req, ret);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req_find_next(req, nxt);
return 0;
#else
@@ -2097,6 +2108,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
flags = READ_ONCE(sqe->msg_flags);
msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
+ io->msg.iov = io->msg.fast_iov;
return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr,
&io->msg.iov);
#else
@@ -2136,7 +2148,6 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else {
kmsg = &io.msg.msg;
kmsg->msg_name = &addr;
- io.msg.iov = io.msg.fast_iov;
ret = io_recvmsg_prep(req, &io);
if (ret)
goto out;
@@ -2161,8 +2172,8 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
out:
io_cqring_add_event(req, ret);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req_find_next(req, nxt);
return 0;
#else
@@ -2196,8 +2207,8 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
@@ -2263,8 +2274,8 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret == -ERESTARTSYS)
ret = -EINTR;
out:
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
@@ -2279,8 +2290,8 @@ static void io_poll_remove_one(struct io_kiocb *req)
spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true);
- if (!list_empty(&poll->wait->entry)) {
- list_del_init(&poll->wait->entry);
+ if (!list_empty(&poll->wait.entry)) {
+ list_del_init(&poll->wait.entry);
io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
@@ -2340,8 +2351,8 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req, ret);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req(req);
return 0;
}
@@ -2351,7 +2362,6 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true;
- kfree(req->poll.wait);
if (error)
io_cqring_fill_event(req, error);
else
@@ -2389,7 +2399,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
*/
spin_lock_irq(&ctx->completion_lock);
if (!mask && ret != -ECANCELED) {
- add_wait_queue(poll->head, poll->wait);
+ add_wait_queue(poll->head, &poll->wait);
spin_unlock_irq(&ctx->completion_lock);
return;
}
@@ -2399,8 +2409,8 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
io_cqring_ev_posted(ctx);
- if (ret < 0 && req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req_find_next(req, &nxt);
if (nxt)
*workptr = &nxt->work;
@@ -2419,7 +2429,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & poll->events))
return 0;
- list_del_init(&poll->wait->entry);
+ list_del_init(&poll->wait.entry);
/*
* Run completion inline if we can. We're using trylock here because
@@ -2460,7 +2470,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
pt->error = 0;
pt->req->poll.head = head;
- add_wait_queue(head, pt->req->poll.wait);
+ add_wait_queue(head, &pt->req->poll.wait);
}
static void io_poll_req_insert(struct io_kiocb *req)
@@ -2489,10 +2499,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (!poll->file)
return -EBADF;
- poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
- if (!poll->wait)
- return -ENOMEM;
-
req->io = NULL;
INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
@@ -2509,9 +2515,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
- INIT_LIST_HEAD(&poll->wait->entry);
- init_waitqueue_func_entry(poll->wait, io_poll_wake);
- poll->wait->private = poll;
+ INIT_LIST_HEAD(&poll->wait.entry);
+ init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ poll->wait.private = poll;
INIT_LIST_HEAD(&req->list);
@@ -2520,14 +2526,14 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
spin_lock_irq(&ctx->completion_lock);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
- if (unlikely(list_empty(&poll->wait->entry))) {
+ if (unlikely(list_empty(&poll->wait.entry))) {
if (ipt.error)
cancel = true;
ipt.error = 0;
mask = 0;
}
if (mask || ipt.error)
- list_del_init(&poll->wait->entry);
+ list_del_init(&poll->wait.entry);
else if (cancel)
WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */
@@ -2582,8 +2588,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(req);
io_put_req(req);
return HRTIMER_NORESTART;
}
@@ -2608,8 +2613,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
if (ret == -1)
return -EALREADY;
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(req);
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
@@ -2640,8 +2644,8 @@ static int io_timeout_remove(struct io_kiocb *req,
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- if (ret < 0 && req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req(req);
return 0;
}
@@ -2822,8 +2826,8 @@ done:
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
+ if (ret < 0)
+ req_set_fail_links(req);
io_put_req_find_next(req, nxt);
}
@@ -2991,12 +2995,7 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
if (req->result == -EAGAIN)
return -EAGAIN;
- /* workqueue context doesn't hold uring_lock, grab it now */
- if (req->in_async)
- mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req);
- if (req->in_async)
- mutex_unlock(&ctx->uring_lock);
}
return 0;
@@ -3044,8 +3043,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_put_req(req);
if (ret) {
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req(req);
}
@@ -3064,7 +3062,12 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
}
}
-static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+static bool io_req_op_valid(int op)
+{
+ return op >= IORING_OP_NOP && op < IORING_OP_LAST;
+}
+
+static int io_op_needs_file(const struct io_uring_sqe *sqe)
{
int op = READ_ONCE(sqe->opcode);
@@ -3075,9 +3078,11 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
case IORING_OP_TIMEOUT_REMOVE:
case IORING_OP_ASYNC_CANCEL:
case IORING_OP_LINK_TIMEOUT:
- return false;
+ return 0;
default:
- return true;
+ if (io_req_op_valid(op))
+ return 1;
+ return -EINVAL;
}
}
@@ -3094,7 +3099,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
- int fd;
+ int fd, ret;
flags = READ_ONCE(req->sqe->flags);
fd = READ_ONCE(req->sqe->fd);
@@ -3102,8 +3107,9 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- if (!io_op_needs_file(req->sqe))
- return 0;
+ ret = io_op_needs_file(req->sqe);
+ if (ret <= 0)
+ return ret;
if (flags & IOSQE_FIXED_FILE) {
if (unlikely(!ctx->file_table ||
@@ -3179,8 +3185,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
- if (prev->flags & REQ_F_LINK)
- prev->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(prev);
io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
-ETIME);
io_put_req(prev);
@@ -3231,13 +3236,14 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req)
{
- struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
+ struct io_kiocb *linked_timeout;
struct io_kiocb *nxt = NULL;
int ret;
+again:
+ linked_timeout = io_prep_linked_timeout(req);
+
ret = io_issue_sqe(req, &nxt, true);
- if (nxt)
- io_queue_async_work(nxt);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -3256,7 +3262,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
- return;
+ goto done_req;
}
err:
@@ -3273,10 +3279,15 @@ err:
/* and drop final reference, if we failed */
if (ret) {
io_cqring_add_event(req, ret);
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(req);
io_put_req(req);
}
+done_req:
+ if (nxt) {
+ req = nxt;
+ nxt = NULL;
+ goto again;
+ }
}
static void io_queue_sqe(struct io_kiocb *req)
@@ -3293,8 +3304,7 @@ static void io_queue_sqe(struct io_kiocb *req)
if (ret) {
if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret);
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ req_set_fail_links(req);
io_double_put_req(req);
}
} else
@@ -3310,8 +3320,8 @@ static inline void io_queue_link_head(struct io_kiocb *req)
io_queue_sqe(req);
}
-
-#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
+#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+ IOSQE_IO_HARDLINK)
static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
struct io_kiocb **link)
@@ -3349,6 +3359,9 @@ err_req:
if (req->sqe->flags & IOSQE_IO_DRAIN)
(*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
+ if (req->sqe->flags & IOSQE_IO_HARDLINK)
+ req->flags |= REQ_F_HARDLINK;
+
io = kmalloc(sizeof(*io), GFP_KERNEL);
if (!io) {
ret = -EAGAIN;
@@ -3358,13 +3371,16 @@ err_req:
ret = io_req_defer_prep(req, io);
if (ret) {
kfree(io);
+ /* fail even hard links since we don't submit */
prev->flags |= REQ_F_FAIL_LINK;
goto err_req;
}
trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->link_list, &prev->link_list);
- } else if (req->sqe->flags & IOSQE_IO_LINK) {
+ } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
req->flags |= REQ_F_LINK;
+ if (req->sqe->flags & IOSQE_IO_HARDLINK)
+ req->flags |= REQ_F_HARDLINK;
INIT_LIST_HEAD(&req->link_list);
*link = req;
@@ -3647,7 +3663,9 @@ static int io_sq_thread(void *data)
}
to_submit = min(to_submit, ctx->sq_entries);
+ mutex_lock(&ctx->uring_lock);
ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+ mutex_unlock(&ctx->uring_lock);
if (ret > 0)
inflight += ret;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2fd0c8bcb8c1..be601d3a8008 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3325,8 +3325,8 @@ struct dentry *mount_subtree(struct vfsmount *m, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
-int ksys_mount(const char __user *dev_name, const char __user *dir_name,
- const char __user *type, unsigned long flags, void __user *data)
+SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+ char __user *, type, unsigned long, flags, void __user *, data)
{
int ret;
char *kernel_type;
@@ -3359,12 +3359,6 @@ out_type:
return ret;
}
-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
- char __user *, type, unsigned long, flags, void __user *, data)
-{
- return ksys_mount(dev_name, dir_name, type, flags, data);
-}
-
/*
* Create a kernel mount representation for a new, prepared superblock
* (specified by fs_fd) and attach to an open_tree-like file descriptor.
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index b801c6353100..6220642fe113 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -227,13 +227,17 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
{
struct ovl_fh *fh;
- int fh_type, fh_len, dwords;
- void *buf;
+ int fh_type, dwords;
int buflen = MAX_HANDLE_SZ;
uuid_t *uuid = &real->d_sb->s_uuid;
+ int err;
- buf = kmalloc(buflen, GFP_KERNEL);
- if (!buf)
+ /* Make sure the real fid stays 32bit aligned */
+ BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
+ BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
+
+ fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL);
+ if (!fh)
return ERR_PTR(-ENOMEM);
/*
@@ -242,27 +246,19 @@ struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
* the price or reconnecting the dentry.
*/
dwords = buflen >> 2;
- fh_type = exportfs_encode_fh(real, buf, &dwords, 0);
+ fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
buflen = (dwords << 2);
- fh = ERR_PTR(-EIO);
+ err = -EIO;
if (WARN_ON(fh_type < 0) ||
WARN_ON(buflen > MAX_HANDLE_SZ) ||
WARN_ON(fh_type == FILEID_INVALID))
- goto out;
+ goto out_err;
- BUILD_BUG_ON(MAX_HANDLE_SZ + offsetof(struct ovl_fh, fid) > 255);
- fh_len = offsetof(struct ovl_fh, fid) + buflen;
- fh = kmalloc(fh_len, GFP_KERNEL);
- if (!fh) {
- fh = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- fh->version = OVL_FH_VERSION;
- fh->magic = OVL_FH_MAGIC;
- fh->type = fh_type;
- fh->flags = OVL_FH_FLAG_CPU_ENDIAN;
+ fh->fb.version = OVL_FH_VERSION;
+ fh->fb.magic = OVL_FH_MAGIC;
+ fh->fb.type = fh_type;
+ fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN;
/*
* When we will want to decode an overlay dentry from this handle
* and all layers are on the same fs, if we get a disconncted real
@@ -270,14 +266,15 @@ struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
* it to upperdentry or to lowerstack is by checking this flag.
*/
if (is_upper)
- fh->flags |= OVL_FH_FLAG_PATH_UPPER;
- fh->len = fh_len;
- fh->uuid = *uuid;
- memcpy(fh->fid, buf, buflen);
+ fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
+ fh->fb.len = sizeof(fh->fb) + buflen;
+ fh->fb.uuid = *uuid;
-out:
- kfree(buf);
return fh;
+
+out_err:
+ kfree(fh);
+ return ERR_PTR(err);
}
int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
@@ -300,8 +297,8 @@ int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
/*
* Do not fail when upper doesn't support xattrs.
*/
- err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
- fh ? fh->len : 0, 0);
+ err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh->buf,
+ fh ? fh->fb.len : 0, 0);
kfree(fh);
return err;
@@ -317,7 +314,7 @@ static int ovl_set_upper_fh(struct dentry *upper, struct dentry *index)
if (IS_ERR(fh))
return PTR_ERR(fh);
- err = ovl_do_setxattr(index, OVL_XATTR_UPPER, fh, fh->len, 0);
+ err = ovl_do_setxattr(index, OVL_XATTR_UPPER, fh->buf, fh->fb.len, 0);
kfree(fh);
return err;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 702aa63f6774..29abdb1d3b5c 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -1170,7 +1170,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
if (newdentry == trap)
goto out_dput;
- if (WARN_ON(olddentry->d_inode == newdentry->d_inode))
+ if (olddentry->d_inode == newdentry->d_inode)
goto out_dput;
err = 0;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 73c9775215b3..70e55588aedc 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -211,10 +211,11 @@ static int ovl_check_encode_origin(struct dentry *dentry)
return 1;
}
-static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
+static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen)
{
struct ovl_fh *fh = NULL;
int err, enc_lower;
+ int len;
/*
* Check if we should encode a lower or upper file handle and maybe
@@ -231,11 +232,12 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
return PTR_ERR(fh);
err = -EOVERFLOW;
- if (fh->len > buflen)
+ len = OVL_FH_LEN(fh);
+ if (len > buflen)
goto fail;
- memcpy(buf, (char *)fh, fh->len);
- err = fh->len;
+ memcpy(fid, fh, len);
+ err = len;
out:
kfree(fh);
@@ -243,31 +245,16 @@ out:
fail:
pr_warn_ratelimited("overlayfs: failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
- dentry, err, buflen, fh ? (int)fh->len : 0,
- fh ? fh->type : 0);
+ dentry, err, buflen, fh ? (int)fh->fb.len : 0,
+ fh ? fh->fb.type : 0);
goto out;
}
-static int ovl_dentry_to_fh(struct dentry *dentry, u32 *fid, int *max_len)
-{
- int res, len = *max_len << 2;
-
- res = ovl_d_to_fh(dentry, (char *)fid, len);
- if (res <= 0)
- return FILEID_INVALID;
-
- len = res;
-
- /* Round up to dwords */
- *max_len = (len + 3) >> 2;
- return OVL_FILEID;
-}
-
static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent)
{
struct dentry *dentry;
- int type;
+ int bytes = *max_len << 2;
/* TODO: encode connectable file handles */
if (parent)
@@ -277,10 +264,14 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
if (WARN_ON(!dentry))
return FILEID_INVALID;
- type = ovl_dentry_to_fh(dentry, fid, max_len);
-
+ bytes = ovl_dentry_to_fid(dentry, fid, bytes);
dput(dentry);
- return type;
+ if (bytes <= 0)
+ return FILEID_INVALID;
+
+ *max_len = bytes >> 2;
+
+ return OVL_FILEID_V1;
}
/*
@@ -777,24 +768,45 @@ out_err:
goto out;
}
+static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type)
+{
+ struct ovl_fh *fh;
+
+ /* If on-wire inner fid is aligned - nothing to do */
+ if (fh_type == OVL_FILEID_V1)
+ return (struct ovl_fh *)fid;
+
+ if (fh_type != OVL_FILEID_V0)
+ return ERR_PTR(-EINVAL);
+
+ fh = kzalloc(buflen, GFP_KERNEL);
+ if (!fh)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy unaligned inner fh into aligned buffer */
+ memcpy(&fh->fb, fid, buflen - OVL_FH_WIRE_OFFSET);
+ return fh;
+}
+
static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
struct dentry *dentry = NULL;
- struct ovl_fh *fh = (struct ovl_fh *) fid;
+ struct ovl_fh *fh = NULL;
int len = fh_len << 2;
unsigned int flags = 0;
int err;
- err = -EINVAL;
- if (fh_type != OVL_FILEID)
+ fh = ovl_fid_to_fh(fid, len, fh_type);
+ err = PTR_ERR(fh);
+ if (IS_ERR(fh))
goto out_err;
err = ovl_check_fh_len(fh, len);
if (err)
goto out_err;
- flags = fh->flags;
+ flags = fh->fb.flags;
dentry = (flags & OVL_FH_FLAG_PATH_UPPER) ?
ovl_upper_fh_to_d(sb, fh) :
ovl_lower_fh_to_d(sb, fh);
@@ -802,12 +814,18 @@ static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
if (IS_ERR(dentry) && err != -ESTALE)
goto out_err;
+out:
+ /* We may have needed to re-align OVL_FILEID_V0 */
+ if (!IS_ERR_OR_NULL(fh) && fh != (void *)fid)
+ kfree(fh);
+
return dentry;
out_err:
pr_warn_ratelimited("overlayfs: failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
- len, fh_type, flags, err);
- return ERR_PTR(err);
+ fh_len, fh_type, flags, err);
+ dentry = ERR_PTR(err);
+ goto out;
}
static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index bc14781886bf..b045cf1826fc 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -200,8 +200,14 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
(!ovl_verify_lower(dentry->d_sb) &&
(is_dir || lowerstat.nlink == 1))) {
- stat->ino = lowerstat.ino;
lower_layer = ovl_layer_lower(dentry);
+ /*
+ * Cannot use origin st_dev;st_ino because
+ * origin inode content may differ from overlay
+ * inode content.
+ */
+ if (samefs || lower_layer->fsid)
+ stat->ino = lowerstat.ino;
}
/*
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index c269d6033525..76ff66339173 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -84,21 +84,21 @@ static int ovl_acceptable(void *ctx, struct dentry *dentry)
* Return -ENODATA for "origin unknown".
* Return <0 for an invalid file handle.
*/
-int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
+int ovl_check_fb_len(struct ovl_fb *fb, int fb_len)
{
- if (fh_len < sizeof(struct ovl_fh) || fh_len < fh->len)
+ if (fb_len < sizeof(struct ovl_fb) || fb_len < fb->len)
return -EINVAL;
- if (fh->magic != OVL_FH_MAGIC)
+ if (fb->magic != OVL_FH_MAGIC)
return -EINVAL;
/* Treat larger version and unknown flags as "origin unknown" */
- if (fh->version > OVL_FH_VERSION || fh->flags & ~OVL_FH_FLAG_ALL)
+ if (fb->version > OVL_FH_VERSION || fb->flags & ~OVL_FH_FLAG_ALL)
return -ENODATA;
/* Treat endianness mismatch as "origin unknown" */
- if (!(fh->flags & OVL_FH_FLAG_ANY_ENDIAN) &&
- (fh->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN)
+ if (!(fb->flags & OVL_FH_FLAG_ANY_ENDIAN) &&
+ (fb->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN)
return -ENODATA;
return 0;
@@ -119,15 +119,15 @@ static struct ovl_fh *ovl_get_fh(struct dentry *dentry, const char *name)
if (res == 0)
return NULL;
- fh = kzalloc(res, GFP_KERNEL);
+ fh = kzalloc(res + OVL_FH_WIRE_OFFSET, GFP_KERNEL);
if (!fh)
return ERR_PTR(-ENOMEM);
- res = vfs_getxattr(dentry, name, fh, res);
+ res = vfs_getxattr(dentry, name, fh->buf, res);
if (res < 0)
goto fail;
- err = ovl_check_fh_len(fh, res);
+ err = ovl_check_fb_len(&fh->fb, res);
if (err < 0) {
if (err == -ENODATA)
goto out;
@@ -158,12 +158,12 @@ struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
* Make sure that the stored uuid matches the uuid of the lower
* layer where file handle will be decoded.
*/
- if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
+ if (!uuid_equal(&fh->fb.uuid, &mnt->mnt_sb->s_uuid))
return NULL;
- bytes = (fh->len - offsetof(struct ovl_fh, fid));
- real = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
- bytes >> 2, (int)fh->type,
+ bytes = (fh->fb.len - offsetof(struct ovl_fb, fid));
+ real = exportfs_decode_fh(mnt, (struct fid *)fh->fb.fid,
+ bytes >> 2, (int)fh->fb.type,
connected ? ovl_acceptable : NULL, mnt);
if (IS_ERR(real)) {
/*
@@ -173,7 +173,7 @@ struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
* index entries correctly.
*/
if (real == ERR_PTR(-ESTALE) &&
- !(fh->flags & OVL_FH_FLAG_PATH_UPPER))
+ !(fh->fb.flags & OVL_FH_FLAG_PATH_UPPER))
real = NULL;
return real;
}
@@ -323,6 +323,14 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
int i;
for (i = 0; i < ofs->numlower; i++) {
+ /*
+ * If lower fs uuid is not unique among lower fs we cannot match
+ * fh->uuid to layer.
+ */
+ if (ofs->lower_layers[i].fsid &&
+ ofs->lower_layers[i].fs->bad_uuid)
+ continue;
+
origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
connected);
if (origin)
@@ -400,7 +408,7 @@ static int ovl_verify_fh(struct dentry *dentry, const char *name,
if (IS_ERR(ofh))
return PTR_ERR(ofh);
- if (fh->len != ofh->len || memcmp(fh, ofh, fh->len))
+ if (fh->fb.len != ofh->fb.len || memcmp(&fh->fb, &ofh->fb, fh->fb.len))
err = -ESTALE;
kfree(ofh);
@@ -431,7 +439,7 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
err = ovl_verify_fh(dentry, name, fh);
if (set && err == -ENODATA)
- err = ovl_do_setxattr(dentry, name, fh, fh->len, 0);
+ err = ovl_do_setxattr(dentry, name, fh->buf, fh->fb.len, 0);
if (err)
goto fail;
@@ -505,20 +513,20 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
goto fail;
err = -EINVAL;
- if (index->d_name.len < sizeof(struct ovl_fh)*2)
+ if (index->d_name.len < sizeof(struct ovl_fb)*2)
goto fail;
err = -ENOMEM;
len = index->d_name.len / 2;
- fh = kzalloc(len, GFP_KERNEL);
+ fh = kzalloc(len + OVL_FH_WIRE_OFFSET, GFP_KERNEL);
if (!fh)
goto fail;
err = -EINVAL;
- if (hex2bin((u8 *)fh, index->d_name.name, len))
+ if (hex2bin(fh->buf, index->d_name.name, len))
goto fail;
- err = ovl_check_fh_len(fh, len);
+ err = ovl_check_fb_len(&fh->fb, len);
if (err)
goto fail;
@@ -597,11 +605,11 @@ static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name)
{
char *n, *s;
- n = kcalloc(fh->len, 2, GFP_KERNEL);
+ n = kcalloc(fh->fb.len, 2, GFP_KERNEL);
if (!n)
return -ENOMEM;
- s = bin2hex(n, fh, fh->len);
+ s = bin2hex(n, fh->buf, fh->fb.len);
*name = (struct qstr) QSTR_INIT(n, s - n);
return 0;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 6934bcf030f0..f283b1d69a9e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -71,20 +71,36 @@ enum ovl_entry_flag {
#error Endianness not defined
#endif
-/* The type returned by overlay exportfs ops when encoding an ovl_fh handle */
-#define OVL_FILEID 0xfb
+/* The type used to be returned by overlay exportfs for misaligned fid */
+#define OVL_FILEID_V0 0xfb
+/* The type returned by overlay exportfs for 32bit aligned fid */
+#define OVL_FILEID_V1 0xf8
-/* On-disk and in-memeory format for redirect by file handle */
-struct ovl_fh {
+/* On-disk format for "origin" file handle */
+struct ovl_fb {
u8 version; /* 0 */
u8 magic; /* 0xfb */
u8 len; /* size of this header + size of fid */
u8 flags; /* OVL_FH_FLAG_* */
u8 type; /* fid_type of fid */
uuid_t uuid; /* uuid of filesystem */
- u8 fid[0]; /* file identifier */
+ u32 fid[0]; /* file identifier should be 32bit aligned in-memory */
} __packed;
+/* In-memory and on-wire format for overlay file handle */
+struct ovl_fh {
+ u8 padding[3]; /* make sure fb.fid is 32bit aligned */
+ union {
+ struct ovl_fb fb;
+ u8 buf[0];
+ };
+} __packed;
+
+#define OVL_FH_WIRE_OFFSET offsetof(struct ovl_fh, fb)
+#define OVL_FH_LEN(fh) (OVL_FH_WIRE_OFFSET + (fh)->fb.len)
+#define OVL_FH_FID_OFFSET (OVL_FH_WIRE_OFFSET + \
+ offsetof(struct ovl_fb, fid))
+
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
{
int err = vfs_rmdir(dir, dentry);
@@ -302,7 +318,13 @@ static inline void ovl_inode_unlock(struct inode *inode)
/* namei.c */
-int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
+int ovl_check_fb_len(struct ovl_fb *fb, int fb_len);
+
+static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
+{
+ return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET);
+}
+
struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
bool connected);
int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index a8279280e88d..28348c44ea5b 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -22,6 +22,8 @@ struct ovl_config {
struct ovl_sb {
struct super_block *sb;
dev_t pseudo_dev;
+ /* Unusable (conflicting) uuid */
+ bool bad_uuid;
};
struct ovl_layer {
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index afbcb116a7f1..7621ff176d15 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1255,7 +1255,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
{
unsigned int i;
- if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt))
+ if (!ofs->config.nfs_export && !ofs->upper_mnt)
return true;
for (i = 0; i < ofs->numlowerfs; i++) {
@@ -1263,9 +1263,13 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
* We use uuid to associate an overlay lower file handle with a
* lower layer, so we can accept lower fs with null uuid as long
* as all lower layers with null uuid are on the same fs.
+ * if we detect multiple lower fs with the same uuid, we
+ * disable lower file handle decoding on all of them.
*/
- if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid))
+ if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
+ ofs->lower_fs[i].bad_uuid = true;
return false;
+ }
}
return true;
}
@@ -1277,6 +1281,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
unsigned int i;
dev_t dev;
int err;
+ bool bad_uuid = false;
/* fsid 0 is reserved for upper fs even with non upper overlay */
if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
@@ -1288,11 +1293,15 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
}
if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
- ofs->config.index = false;
- ofs->config.nfs_export = false;
- pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
- uuid_is_null(&sb->s_uuid) ? "null" : "conflicting",
- path->dentry);
+ bad_uuid = true;
+ if (ofs->config.index || ofs->config.nfs_export) {
+ ofs->config.index = false;
+ ofs->config.nfs_export = false;
+ pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+ uuid_is_null(&sb->s_uuid) ? "null" :
+ "conflicting",
+ path->dentry);
+ }
}
err = get_anon_bdev(&dev);
@@ -1303,6 +1312,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
ofs->lower_fs[ofs->numlowerfs].sb = sb;
ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
+ ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
ofs->numlowerfs++;
return ofs->numlowerfs;
diff --git a/fs/pipe.c b/fs/pipe.c
index 87109e761fa5..04d004ee2e8c 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -364,17 +364,39 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = -EAGAIN;
break;
}
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
__pipe_unlock(pipe);
- if (was_full) {
+
+ /*
+ * We only get here if we didn't actually read anything.
+ *
+ * However, we could have seen (and removed) a zero-sized
+ * pipe buffer, and might have made space in the buffers
+ * that way.
+ *
+ * You can't make zero-sized pipe buffers by doing an empty
+ * write (not even in packet mode), but they can happen if
+ * the writer gets an EFAULT when trying to fill a buffer
+ * that already got allocated and inserted in the buffer
+ * array.
+ *
+ * So we still need to wake up any pending writers in the
+ * _very_ unlikely case that the pipe was full, but we got
+ * no data.
+ */
+ if (unlikely(was_full)) {
wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
- wait_event_interruptible(pipe->wait, pipe_readable(pipe));
+
+ /*
+ * But because we didn't read anything, at this point we can
+ * just return directly with -ERESTARTSYS if we're interrupted,
+ * since we've done any required wakeups and there's no need
+ * to mark anything accessed. And we've dropped the lock.
+ */
+ if (wait_event_interruptible(pipe->wait, pipe_readable(pipe)) < 0)
+ return -ERESTARTSYS;
+
__pipe_lock(pipe);
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
}
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index eabc6ac19906..b79e3fd19d11 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -315,7 +315,7 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
if (arg.block_size != PAGE_SIZE)
return -EINVAL;
- if (arg.salt_size > FIELD_SIZEOF(struct fsverity_descriptor, salt))
+ if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
return -EMSGSIZE;
if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index e785c6eb3561..4b1a7724f20d 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -72,6 +72,8 @@
#define MMP2_CLK_CCIC1_PHY 118
#define MMP2_CLK_CCIC1_SPHY 119
#define MMP2_CLK_DISP0_LCDC 120
+#define MMP2_CLK_USBHSIC0 121
+#define MMP2_CLK_USBHSIC1 122
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 19394c77ed99..e4a6949fd171 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -188,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */
@@ -720,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2bae9ed3c783..fb376b5b7281 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#define DEVFREQ_NAME_LEN 16
@@ -123,8 +124,8 @@ struct devfreq_dev_profile {
* @previous_freq: previously configured frequency value.
* @data: Private data of the governor. The devfreq framework does not
* touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
+ * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
@@ -136,6 +137,8 @@ struct devfreq_dev_profile {
* @time_in_state: Statistics of devfreq states
* @last_stat_updated: The last time stat updated
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
+ * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
* This structure stores the devfreq information for a give device.
*
@@ -161,8 +164,8 @@ struct devfreq {
void *data; /* private data for governors */
- unsigned long min_freq;
- unsigned long max_freq;
+ struct dev_pm_qos_request user_min_freq_req;
+ struct dev_pm_qos_request user_max_freq_req;
unsigned long scaling_min_freq;
unsigned long scaling_max_freq;
bool stop_polling;
@@ -178,6 +181,9 @@ struct devfreq {
unsigned long last_stat_updated;
struct srcu_notifier_head transition_notifier_list;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct devfreq_freqs {
diff --git a/include/linux/device.h b/include/linux/device.h
index e226030c1df3..96ff76731e93 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1666,11 +1666,11 @@ extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mntdir);
+extern int devtmpfs_mount(void);
#else
static inline int devtmpfs_create_node(struct device *dev) { return 0; }
static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
-static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a141cb07e76a..345f3748e0fb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -420,7 +420,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_FIELD_SIZEOF(type, field) \
({ \
- const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
BUILD_BUG_ON(__size < 0); \
__size; \
})
@@ -497,7 +497,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
- BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
+ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
@@ -608,7 +608,7 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
- BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
cb->data_meta = skb->data - skb_metadata_len(skb);
cb->data_end = skb->data + skb_headlen(skb);
}
@@ -646,9 +646,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
* attached to sockets, we need to clear the bpf_skb_cb() area
* to not leak previous contents to user space.
*/
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- FIELD_SIZEOF(struct qdisc_skb_cb, data));
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+ sizeof_field(struct qdisc_skb_cb, data));
return qdisc_skb_cb(skb)->data;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7247d35c3d16..db95244a62d4 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -264,6 +264,7 @@ int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
struct dyn_ftrace *rec,
unsigned long old_addr,
unsigned long new_addr);
+unsigned long ftrace_find_rec_direct(unsigned long ip);
#else
# define ftrace_direct_func_count 0
static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
@@ -290,6 +291,10 @@ static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
{
return -ENODEV;
}
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+ return 0;
+}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d2f786706657..582ef05ec07e 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -300,6 +300,7 @@ struct i2c_driver {
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @dev: Driver model device node for the slave.
+ * @init_irq: IRQ that was set at initialization
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
@@ -466,12 +467,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
/* Common custom probe functions */
extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
-/* For devices that use several addresses, use i2c_new_dummy() to make
- * client handles for the extra addresses.
- */
-extern struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adap, u16 address);
-
extern struct i2c_client *
i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
@@ -856,6 +851,11 @@ extern void i2c_del_driver(struct i2c_driver *driver);
#define i2c_add_driver(driver) \
i2c_register_driver(THIS_MODULE, driver)
+static inline bool i2c_client_has_driver(struct i2c_client *client)
+{
+ return !IS_ERR_OR_NULL(client) && client->dev.driver;
+}
+
/* call the i2c_client->command() of all attached clients with
* the given arguments */
extern void i2c_clients_command(struct i2c_adapter *adap,
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index d77fe34fb00a..aa5914355728 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -28,3 +28,5 @@ extern unsigned int real_root_dev;
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
+
+void console_on_rootfs(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7ed1e2f8641e..538c25e778c0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -149,7 +149,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
- BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
+ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 10f81629b9ce..6d0d70f3219c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
+ * @module: The LLDD module using the interface
+ *
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
+ struct module *module;
+
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index 3d507a8a6989..5c4d7a755101 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -14,7 +14,7 @@ struct phy_device;
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
- FIELD_SIZEOF(struct mdio_device, addr)+\
+ sizeof_field(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
struct phy_led_trigger {
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c09d67edda3a..1e6108b8d15f 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -302,9 +302,8 @@ extern int kptr_restrict;
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
+#define pr_warn(fmt, ...) \
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn pr_warning
#define pr_notice(fmt, ...) \
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d0391cc2dae9..2960dedcfde8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1231,10 +1231,7 @@ asmlinkage long sys_ni_syscall(void);
* the ksys_xyzyyz() functions prototyped below.
*/
-int ksys_mount(const char __user *dev_name, const char __user *dir_name,
- const char __user *type, unsigned long flags, void __user *data);
int ksys_umount(char __user *name, int flags);
-int ksys_dup(unsigned int fildes);
int ksys_chroot(const char __user *filename);
ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
int ksys_chdir(const char __user *filename);
diff --git a/include/net/garp.h b/include/net/garp.h
index c41833bd4590..4d9a0c6a2e5f 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -37,7 +37,7 @@ struct garp_skb_cb {
static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct garp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
return (struct garp_skb_cb *)skb->cb;
}
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index af645604f328..236503a50759 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -33,8 +33,8 @@
/* Used to memset ipv4 address padding. */
#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
- (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
- FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+ (sizeof_field(struct ip_tunnel_key, u) - \
+ sizeof_field(struct ip_tunnel_key, u.ipv4))
struct ip_tunnel_key {
__be64 tun_id;
@@ -63,7 +63,7 @@ struct ip_tunnel_key {
/* Maximum tunnel options length. */
#define IP_TUNNEL_OPTS_MAX \
- GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
+ GENMASK((sizeof_field(struct ip_tunnel_info, \
options_len) * BITS_PER_BYTE) - 1, 0)
struct ip_tunnel_info {
diff --git a/include/net/mrp.h b/include/net/mrp.h
index ef58b4a07190..1c308c034e1a 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -39,7 +39,7 @@ struct mrp_skb_cb {
static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
return (struct mrp_skb_cb *)skb->cb;
}
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 44b5a00a9c64..37f0fbefb060 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -81,7 +81,7 @@ struct nf_conn_help {
};
#define NF_CT_HELPER_BUILD_BUG_ON(structsize) \
- BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data))
+ BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conn_help, data))
struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
u16 l3num, u8 protonum);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 7281895fa6d9..2656155b4069 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -41,7 +41,7 @@ struct nft_immediate_expr {
*/
static inline u32 nft_cmp_fast_mask(unsigned int len)
{
- return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
+ return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 87d54ef57f00..80f996406bba 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2305,7 +2305,7 @@ struct sock_skb_cb {
* using skb->cb[] would keep using it directly and utilize its
* alignement guarantee.
*/
-#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
sizeof(struct sock_skb_cb)))
#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index cacb48faf670..5608e14e3aad 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2832,6 +2832,11 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry,
size_t length);
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 min_pgoff,
+ u32 max_pgoff);
+
struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff);
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index eabccb46edd1..a3300e1b9a01 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -48,6 +48,7 @@ struct io_uring_sqe {
#define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */
#define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */
#define IOSQE_IO_LINK (1U << 2) /* links next sqe */
+#define IOSQE_IO_HARDLINK (1U << 3) /* like LINK, but stronger */
/*
* io_uring_setup() flags
@@ -57,23 +58,28 @@ struct io_uring_sqe {
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
-#define IORING_OP_NOP 0
-#define IORING_OP_READV 1
-#define IORING_OP_WRITEV 2
-#define IORING_OP_FSYNC 3
-#define IORING_OP_READ_FIXED 4
-#define IORING_OP_WRITE_FIXED 5
-#define IORING_OP_POLL_ADD 6
-#define IORING_OP_POLL_REMOVE 7
-#define IORING_OP_SYNC_FILE_RANGE 8
-#define IORING_OP_SENDMSG 9
-#define IORING_OP_RECVMSG 10
-#define IORING_OP_TIMEOUT 11
-#define IORING_OP_TIMEOUT_REMOVE 12
-#define IORING_OP_ACCEPT 13
-#define IORING_OP_ASYNC_CANCEL 14
-#define IORING_OP_LINK_TIMEOUT 15
-#define IORING_OP_CONNECT 16
+enum {
+ IORING_OP_NOP,
+ IORING_OP_READV,
+ IORING_OP_WRITEV,
+ IORING_OP_FSYNC,
+ IORING_OP_READ_FIXED,
+ IORING_OP_WRITE_FIXED,
+ IORING_OP_POLL_ADD,
+ IORING_OP_POLL_REMOVE,
+ IORING_OP_SYNC_FILE_RANGE,
+ IORING_OP_SENDMSG,
+ IORING_OP_RECVMSG,
+ IORING_OP_TIMEOUT,
+ IORING_OP_TIMEOUT_REMOVE,
+ IORING_OP_ACCEPT,
+ IORING_OP_ASYNC_CANCEL,
+ IORING_OP_LINK_TIMEOUT,
+ IORING_OP_CONNECT,
+
+ /* this goes last, obviously */
+ IORING_OP_LAST,
+};
/*
* sqe->fsync_flags
diff --git a/init/do_mounts.c b/init/do_mounts.c
index af9cda887a23..f55cbd9cb818 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -387,12 +387,25 @@ static void __init get_fs_names(char *page)
*s = '\0';
}
-static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+static int __init do_mount_root(const char *name, const char *fs,
+ const int flags, const void *data)
{
struct super_block *s;
- int err = ksys_mount(name, "/root", fs, flags, data);
- if (err)
- return err;
+ char *data_page;
+ struct page *p;
+ int ret;
+
+ /* do_mount() requires a full page as fifth argument */
+ p = alloc_page(GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ data_page = page_address(p);
+ strncpy(data_page, data, PAGE_SIZE - 1);
+
+ ret = do_mount(name, "/root", fs, flags, data_page);
+ if (ret)
+ goto out;
ksys_chdir("/root");
s = current->fs->pwd.dentry->d_sb;
@@ -402,7 +415,10 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
s->s_type->name,
sb_rdonly(s) ? " readonly" : "",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
- return 0;
+
+out:
+ put_page(p);
+ return ret;
}
void __init mount_block_root(char *name, int flags)
@@ -670,8 +686,8 @@ void __init prepare_namespace(void)
mount_root();
out:
- devtmpfs_mount("dev");
- ksys_mount(".", "/", NULL, MS_MOVE, NULL);
+ devtmpfs_mount();
+ do_mount(".", "/", NULL, MS_MOVE, NULL);
ksys_chroot(".");
}
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index a9c6cc56f505..dab8b1151b56 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -48,13 +48,10 @@ early_param("initrd", early_initrd);
static int init_linuxrc(struct subprocess_info *info, struct cred *new)
{
ksys_unshare(CLONE_FS | CLONE_FILES);
- /* stdin/stdout/stderr for /linuxrc */
- ksys_open("/dev/console", O_RDWR, 0);
- ksys_dup(0);
- ksys_dup(0);
+ console_on_rootfs();
/* move initrd over / and chdir/chroot in initrd root */
ksys_chdir("/root");
- ksys_mount(".", "/", NULL, MS_MOVE, NULL);
+ do_mount(".", "/", NULL, MS_MOVE, NULL);
ksys_chroot(".");
ksys_setsid();
return 0;
@@ -89,7 +86,7 @@ static void __init handle_initrd(void)
current->flags &= ~PF_FREEZER_SKIP;
/* move initrd to rootfs' /old */
- ksys_mount("..", ".", NULL, MS_MOVE, NULL);
+ do_mount("..", ".", NULL, MS_MOVE, NULL);
/* switch root and cwd back to / of rootfs */
ksys_chroot("..");
@@ -103,7 +100,7 @@ static void __init handle_initrd(void)
mount_root();
printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
- error = ksys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
+ error = do_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
if (!error)
printk("okay\n");
else {
diff --git a/init/main.c b/init/main.c
index 91f6ebb30ef0..ec3a1463ac69 100644
--- a/init/main.c
+++ b/init/main.c
@@ -93,6 +93,7 @@
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
+#include <linux/file.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -1155,6 +1156,30 @@ static int __ref kernel_init(void *unused)
"See Linux Documentation/admin-guide/init.rst for guidance.");
}
+void console_on_rootfs(void)
+{
+ struct file *file;
+ unsigned int i;
+
+ /* Open /dev/console in kernelspace, this should never fail */
+ file = filp_open("/dev/console", O_RDWR, 0);
+ if (!file)
+ goto err_out;
+
+ /* create stdin/stdout/stderr, this should never fail */
+ for (i = 0; i < 3; i++) {
+ if (f_dupfd(i, file, 0) != i)
+ goto err_out;
+ }
+
+ return;
+
+err_out:
+ /* no panic -- this might not be fatal */
+ pr_err("Warning: unable to open an initial console.\n");
+ return;
+}
+
static noinline void __init kernel_init_freeable(void)
{
/*
@@ -1190,12 +1215,8 @@ static noinline void __init kernel_init_freeable(void)
do_basic_setup();
- /* Open the /dev/console on the rootfs, this should never fail */
- if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
- pr_err("Warning: unable to open an initial console.\n");
+ console_on_rootfs();
- (void) ksys_dup(0);
- (void) ksys_dup(0);
/*
* check if there is an early userspace init. If yes, let it do all
* the work
diff --git a/ipc/util.c b/ipc/util.c
index d126d156efc6..915eacb9c059 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -100,7 +100,7 @@ device_initcall(ipc_init);
static const struct rhashtable_params ipc_kht_params = {
.head_offset = offsetof(struct kern_ipc_perm, khtnode),
.key_offset = offsetof(struct kern_ipc_perm, key),
- .key_len = FIELD_SIZEOF(struct kern_ipc_perm, key),
+ .key_len = sizeof_field(struct kern_ipc_perm, key),
.automatic_shrinking = true,
};
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 9f90d3c92bda..4fb20ab179fe 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1341,7 +1341,7 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(
BPF_SIZE(si->code), si->dst_reg, si->src_reg,
bpf_target_off(struct bpf_sysctl_kern, write,
- FIELD_SIZEOF(struct bpf_sysctl_kern,
+ sizeof_field(struct bpf_sysctl_kern,
write),
target_size));
break;
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 2ba750725cb2..6bd22f6d9f41 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -357,7 +357,7 @@ static int cgroup_storage_check_btf(const struct bpf_map *map,
* The first field must be a 64 bit integer at 0 offset.
*/
m = (struct btf_member *)(key_type + 1);
- size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
+ size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
return -EINVAL;
@@ -366,7 +366,7 @@ static int cgroup_storage_check_btf(const struct bpf_map *map,
*/
m++;
offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
- size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
+ size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
return -EINVAL;
diff --git a/kernel/module.c b/kernel/module.c
index 3a486f826224..b56f3224b161 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3730,6 +3730,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_enable_ro(mod, false);
module_enable_nx(mod);
+ module_enable_x(mod);
/* Mark state as coming so strong_try_module_get() ignores us,
* but kallsyms etc. can see us. */
@@ -3752,11 +3753,6 @@ static int prepare_coming_module(struct module *mod)
if (err)
return err;
- /* Make module executable after ftrace is enabled */
- mutex_lock(&module_mutex);
- module_enable_x(mod);
- mutex_unlock(&module_mutex);
-
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_COMING, mod);
return 0;
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 67e0c462b059..a2659735db73 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -101,6 +101,15 @@ int function_graph_enter(unsigned long ret, unsigned long func,
{
struct ftrace_graph_ent trace;
+ /*
+ * Skip graph tracing if the return location is served by direct trampoline,
+ * since call sequence and return addresses is unpredicatable anymore.
+ * Ex: BPF trampoline may call original function and may skip frame
+ * depending on type of BPF programs attached.
+ */
+ if (ftrace_direct_func_count &&
+ ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
+ return -EBUSY;
trace.func = func;
trace.depth = ++current->curr_ret_depth;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 74439ab5c2b6..ac99a3500076 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2364,7 +2364,7 @@ int ftrace_direct_func_count;
* Search the direct_functions hash to see if the given instruction pointer
* has a direct caller attached to it.
*/
-static unsigned long find_rec_direct(unsigned long ip)
+unsigned long ftrace_find_rec_direct(unsigned long ip)
{
struct ftrace_func_entry *entry;
@@ -2380,7 +2380,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip,
{
unsigned long addr;
- addr = find_rec_direct(ip);
+ addr = ftrace_find_rec_direct(ip);
if (!addr)
return;
@@ -2393,11 +2393,6 @@ struct ftrace_ops direct_ops = {
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT,
};
-#else
-static inline unsigned long find_rec_direct(unsigned long ip)
-{
- return 0;
-}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/**
@@ -2417,7 +2412,7 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
if ((rec->flags & FTRACE_FL_DIRECT) &&
(ftrace_rec_count(rec) == 1)) {
- addr = find_rec_direct(rec->ip);
+ addr = ftrace_find_rec_direct(rec->ip);
if (addr)
return addr;
WARN_ON_ONCE(1);
@@ -2458,7 +2453,7 @@ unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
/* Direct calls take precedence over trampolines */
if (rec->flags & FTRACE_FL_DIRECT_EN) {
- addr = find_rec_direct(rec->ip);
+ addr = ftrace_find_rec_direct(rec->ip);
if (addr)
return addr;
WARN_ON_ONCE(1);
@@ -3604,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v)
if (rec->flags & FTRACE_FL_DIRECT) {
unsigned long direct;
- direct = find_rec_direct(rec->ip);
+ direct = ftrace_find_rec_direct(rec->ip);
if (direct)
seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
}
@@ -5008,7 +5003,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
mutex_lock(&direct_mutex);
/* See if there's a direct function at @ip already */
- if (find_rec_direct(ip))
+ if (ftrace_find_rec_direct(ip))
goto out_unlock;
ret = -ENODEV;
@@ -5027,7 +5022,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
if (ip != rec->ip) {
ip = rec->ip;
/* Need to check this ip for a direct. */
- if (find_rec_direct(ip))
+ if (ftrace_find_rec_direct(ip))
goto out_unlock;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 4bf050fcfe3b..3f655371eaf6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -5070,7 +5070,7 @@ static __init int test_ringbuffer(void)
int ret = 0;
if (security_locked_down(LOCKDOWN_TRACEFS)) {
- pr_warning("Lockdown is enabled, skipping ring buffer tests\n");
+ pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
return 0;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 23459d53d576..6c75410f9698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1889,7 +1889,7 @@ int __init register_tracer(struct tracer *type)
}
if (security_locked_down(LOCKDOWN_TRACEFS)) {
- pr_warning("Can not register tracer %s due to lockdown\n",
+ pr_warn("Can not register tracer %s due to lockdown\n",
type->name);
return -EPERM;
}
@@ -8796,7 +8796,7 @@ struct dentry *tracing_init_dentry(void)
struct trace_array *tr = &global_trace;
if (security_locked_down(LOCKDOWN_TRACEFS)) {
- pr_warning("Tracing disabled due to lockdown\n");
+ pr_warn("Tracing disabled due to lockdown\n");
return ERR_PTR(-EPERM);
}
@@ -9244,7 +9244,7 @@ __init static int tracer_alloc_buffers(void)
if (security_locked_down(LOCKDOWN_TRACEFS)) {
- pr_warning("Tracing disabled due to lockdown\n");
+ pr_warn("Tracing disabled due to lockdown\n");
return -EPERM;
}
diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
index d43710718ee5..d45079ee62f8 100644
--- a/kernel/trace/trace_events_inject.c
+++ b/kernel/trace/trace_events_inject.c
@@ -17,12 +17,10 @@ static int
trace_inject_entry(struct trace_event_file *file, void *rec, int len)
{
struct trace_event_buffer fbuffer;
- struct ring_buffer *buffer;
int written = 0;
void *entry;
rcu_read_lock_sched();
- buffer = file->tr->trace_buffer.buffer;
entry = trace_event_buffer_reserve(&fbuffer, file, len);
if (entry) {
memcpy(entry, rec, len);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bc88fd939f4e..cfc923558e04 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4374,8 +4374,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
for_each_pwq(pwq, wq) {
spin_lock_irq(&pwq->pool->lock);
if (WARN_ON(pwq_busy(pwq))) {
- pr_warning("%s: %s has the following busy pwq\n",
- __func__, wq->name);
+ pr_warn("%s: %s has the following busy pwq\n",
+ __func__, wq->name);
show_pwq(pwq);
spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
index c6aa03631df8..0809805a7e23 100644
--- a/lib/raid6/unroll.awk
+++ b/lib/raid6/unroll.awk
@@ -13,7 +13,7 @@ BEGIN {
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
+ gsub(/\$#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 2cfdfbfbb2ed..bea6e43d45a0 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -523,7 +523,7 @@ int mrp_request_join(const struct net_device *dev,
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
- FIELD_SIZEOF(struct sk_buff, cb))
+ sizeof_field(struct sk_buff, cb))
return -ENOMEM;
spin_lock_bh(&app->lock);
@@ -548,7 +548,7 @@ void mrp_request_leave(const struct net_device *dev,
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
- FIELD_SIZEOF(struct sk_buff, cb))
+ sizeof_field(struct sk_buff, cb))
return;
spin_lock_bh(&app->lock);
@@ -692,7 +692,7 @@ static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
* advance to the next event in its Vector.
*/
if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
- FIELD_SIZEOF(struct sk_buff, cb))
+ sizeof_field(struct sk_buff, cb))
return -1;
if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen) < 0)
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 4a89177def64..4811ec65bc43 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -548,7 +548,7 @@ static void batadv_recv_handler_init(void)
BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
- i = FIELD_SIZEOF(struct sk_buff, cb);
+ i = sizeof_field(struct sk_buff, cb);
BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
/* broadcast packet */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 915c2d6f7fb9..f79205d4444f 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -253,21 +253,21 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
/* priority is allowed */
if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
- FIELD_SIZEOF(struct __sk_buff, priority),
+ sizeof_field(struct __sk_buff, priority),
offsetof(struct __sk_buff, cb)))
return -EINVAL;
/* cb is allowed */
if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
- FIELD_SIZEOF(struct __sk_buff, cb),
+ sizeof_field(struct __sk_buff, cb),
offsetof(struct __sk_buff, tstamp)))
return -EINVAL;
/* tstamp is allowed */
if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
- FIELD_SIZEOF(struct __sk_buff, tstamp),
+ sizeof_field(struct __sk_buff, tstamp),
sizeof(struct __sk_buff)))
return -EINVAL;
@@ -438,7 +438,7 @@ static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
/* flags is allowed */
if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) +
- FIELD_SIZEOF(struct bpf_flow_keys, flags),
+ sizeof_field(struct bpf_flow_keys, flags),
sizeof(struct bpf_flow_keys)))
return -EINVAL;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 8a8f9e5f264f..b6fe30e3768f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -312,7 +312,7 @@ static int __init br_init(void)
{
int err;
- BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > sizeof_field(struct sk_buff, cb));
err = stp_proto_register(&br_stp_proto);
if (err < 0) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 2c277b8aba38..0ad39c87b7fd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10165,7 +10165,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
static int __net_init netdev_init(struct net *net)
{
BUILD_BUG_ON(GRO_HASH_BUCKETS >
- 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
+ 8 * sizeof_field(struct napi_struct, gro_bitmask));
if (net != &init_net)
INIT_LIST_HEAD(&net->dev_base_head);
diff --git a/net/core/filter.c b/net/core/filter.c
index f1e703eed3d2..c19dd0973e0c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -274,7 +274,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
switch (skb_field) {
case SKF_AD_MARK:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, mark));
@@ -289,14 +289,14 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
break;
case SKF_AD_QUEUE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, queue_mapping));
break;
case SKF_AD_VLAN_TAG:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
@@ -322,7 +322,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PROTOCOL:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
/* A = *(u16 *) (CTX + offsetof(protocol)) */
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
@@ -338,8 +338,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+ BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
+ BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
BPF_REG_TMP, BPF_REG_CTX,
@@ -361,7 +361,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, hash));
@@ -385,7 +385,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_VLAN_TPID:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
/* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
@@ -5589,8 +5589,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
do { \
- BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \
- FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+ BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \
+ sizeof_field(struct bpf_tcp_sock, FIELD)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
si->dst_reg, si->src_reg, \
offsetof(struct tcp_sock, FIELD)); \
@@ -5598,9 +5598,9 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
#define BPF_INET_SOCK_GET_COMMON(FIELD) \
do { \
- BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock, \
+ BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \
FIELD) > \
- FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+ sizeof_field(struct bpf_tcp_sock, FIELD)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct inet_connection_sock, \
FIELD), \
@@ -5615,7 +5615,7 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
switch (si->off) {
case offsetof(struct bpf_tcp_sock, rtt_min):
- BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+ BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
sizeof(struct minmax));
BUILD_BUG_ON(sizeof(struct minmax) <
sizeof(struct minmax_sample));
@@ -5780,8 +5780,8 @@ u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
#define BPF_XDP_SOCK_GET(FIELD) \
do { \
- BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \
- FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
+ BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \
+ sizeof_field(struct bpf_xdp_sock, FIELD)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
si->dst_reg, si->src_reg, \
offsetof(struct xdp_sock, FIELD)); \
@@ -7344,7 +7344,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, cb[0]) ...
offsetofend(struct __sk_buff, cb[4]) - 1:
- BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+ BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
offsetof(struct qdisc_skb_cb, data)) %
sizeof(__u64));
@@ -7363,7 +7363,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, tc_classid):
- BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
+ BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
off = si->off;
off -= offsetof(struct __sk_buff, tc_classid);
@@ -7434,7 +7434,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
#endif
break;
case offsetof(struct __sk_buff, family):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
si->dst_reg, si->src_reg,
@@ -7445,7 +7445,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
2, target_size));
break;
case offsetof(struct __sk_buff, remote_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
si->dst_reg, si->src_reg,
@@ -7456,7 +7456,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
4, target_size));
break;
case offsetof(struct __sk_buff, local_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_rcv_saddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
@@ -7470,7 +7470,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, remote_ip6[0]) ...
offsetof(struct __sk_buff, remote_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_daddr.s6_addr32[0]) != 4);
off = si->off;
@@ -7490,7 +7490,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, local_ip6[0]) ...
offsetof(struct __sk_buff, local_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0]) != 4);
off = si->off;
@@ -7509,7 +7509,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, remote_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
si->dst_reg, si->src_reg,
@@ -7524,7 +7524,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, local_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
si->dst_reg, si->src_reg,
@@ -7535,7 +7535,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, tstamp):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_DW,
@@ -7573,7 +7573,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
target_size));
break;
case offsetof(struct __sk_buff, wire_len):
- BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+ BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
off = si->off;
off -= offsetof(struct __sk_buff, wire_len);
@@ -7603,7 +7603,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
switch (si->off) {
case offsetof(struct bpf_sock, bound_dev_if):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7614,7 +7614,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock, mark):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7625,7 +7625,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock, priority):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7641,7 +7641,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common,
skc_family,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_family),
target_size));
break;
@@ -7668,7 +7668,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(
BPF_SIZE(si->code), si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common, skc_rcv_saddr,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_rcv_saddr),
target_size));
break;
@@ -7677,7 +7677,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(
BPF_SIZE(si->code), si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common, skc_daddr,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_daddr),
target_size));
break;
@@ -7691,7 +7691,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(
struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0],
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0]),
target_size) + off);
#else
@@ -7708,7 +7708,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
BPF_SIZE(si->code), si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common,
skc_v6_daddr.s6_addr32[0],
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_v6_daddr.s6_addr32[0]),
target_size) + off);
#else
@@ -7722,7 +7722,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
BPF_FIELD_SIZEOF(struct sock_common, skc_num),
si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common, skc_num,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_num),
target_size));
break;
@@ -7732,7 +7732,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common, skc_dport,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_dport),
target_size));
break;
@@ -7742,7 +7742,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
BPF_FIELD_SIZEOF(struct sock_common, skc_state),
si->dst_reg, si->src_reg,
bpf_target_off(struct sock_common, skc_state,
- FIELD_SIZEOF(struct sock_common,
+ sizeof_field(struct sock_common,
skc_state),
target_size));
break;
@@ -7837,7 +7837,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
si->src_reg, offsetof(S, F)); \
*insn++ = BPF_LDX_MEM( \
SIZE, si->dst_reg, si->dst_reg, \
- bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
+ bpf_target_off(NS, NF, sizeof_field(NS, NF), \
target_size) \
+ OFF); \
} while (0)
@@ -7868,7 +7868,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
si->dst_reg, offsetof(S, F)); \
*insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
- bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
+ bpf_target_off(NS, NF, sizeof_field(NS, NF), \
target_size) \
+ OFF); \
*insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
@@ -7930,8 +7930,8 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
*/
BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
offsetof(struct sockaddr_in6, sin6_port));
- BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
- FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
+ BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
+ sizeof_field(struct sockaddr_in6, sin6_port));
SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
struct sockaddr_in6, uaddr,
sin6_port, tmp_reg);
@@ -7997,8 +7997,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
/* Helper macro for adding read access to tcp_sock or sock fields. */
#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
+ sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
@@ -8031,8 +8031,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \
int reg = BPF_REG_9; \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
+ sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
@@ -8073,12 +8073,12 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
switch (si->off) {
case offsetof(struct bpf_sock_ops, op) ...
offsetof(struct bpf_sock_ops, replylong[3]):
- BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
- FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
- BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
- FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
- BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
- FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
+ BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) !=
+ sizeof_field(struct bpf_sock_ops_kern, op));
+ BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
+ sizeof_field(struct bpf_sock_ops_kern, reply));
+ BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
+ sizeof_field(struct bpf_sock_ops_kern, replylong));
off = si->off;
off -= offsetof(struct bpf_sock_ops, op);
off += offsetof(struct bpf_sock_ops_kern, op);
@@ -8091,7 +8091,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, family):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
@@ -8102,7 +8102,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, remote_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
@@ -8113,7 +8113,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, local_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_rcv_saddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
@@ -8128,7 +8128,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
offsetof(struct bpf_sock_ops, remote_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_daddr.s6_addr32[0]) != 4);
off = si->off;
@@ -8149,7 +8149,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
offsetof(struct bpf_sock_ops, local_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0]) != 4);
off = si->off;
@@ -8168,7 +8168,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, remote_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
@@ -8182,7 +8182,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, local_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
@@ -8202,7 +8202,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, state):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
@@ -8213,7 +8213,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock_ops, rtt_min):
- BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+ BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
sizeof(struct minmax));
BUILD_BUG_ON(sizeof(struct minmax) <
sizeof(struct minmax_sample));
@@ -8224,7 +8224,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
offsetof(struct bpf_sock_ops_kern, sk));
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct tcp_sock, rtt_min) +
- FIELD_SIZEOF(struct minmax_sample, t));
+ sizeof_field(struct minmax_sample, t));
break;
case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
@@ -8366,7 +8366,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
offsetof(struct sk_msg, data_end));
break;
case offsetof(struct sk_msg_md, family):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct sk_msg, sk),
@@ -8377,7 +8377,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_msg_md, remote_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct sk_msg, sk),
@@ -8388,7 +8388,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_msg_md, local_ip4):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_rcv_saddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
@@ -8403,7 +8403,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct sk_msg_md, remote_ip6[0]) ...
offsetof(struct sk_msg_md, remote_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_daddr.s6_addr32[0]) != 4);
off = si->off;
@@ -8424,7 +8424,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct sk_msg_md, local_ip6[0]) ...
offsetof(struct sk_msg_md, local_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+ BUILD_BUG_ON(sizeof_field(struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0]) != 4);
off = si->off;
@@ -8443,7 +8443,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_msg_md, remote_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct sk_msg, sk),
@@ -8457,7 +8457,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_msg_md, local_port):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+ BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct sk_msg, sk),
@@ -8847,7 +8847,7 @@ sk_reuseport_is_valid_access(int off, int size,
/* Fields that allow narrowing */
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
- if (size < FIELD_SIZEOF(struct sk_buff, protocol))
+ if (size < sizeof_field(struct sk_buff, protocol))
return false;
/* fall through */
case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
@@ -8865,7 +8865,7 @@ sk_reuseport_is_valid_access(int off, int size,
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
si->dst_reg, si->src_reg, \
bpf_target_off(struct sk_reuseport_kern, F, \
- FIELD_SIZEOF(struct sk_reuseport_kern, F), \
+ sizeof_field(struct sk_reuseport_kern, F), \
target_size)); \
})
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d524a693e00f..2dbbb030fbed 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -599,8 +599,8 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
offset += sizeof(struct gre_base_hdr);
if (hdr->flags & GRE_CSUM)
- offset += FIELD_SIZEOF(struct gre_full_hdr, csum) +
- FIELD_SIZEOF(struct gre_full_hdr, reserved1);
+ offset += sizeof_field(struct gre_full_hdr, csum) +
+ sizeof_field(struct gre_full_hdr, reserved1);
if (hdr->flags & GRE_KEY) {
const __be32 *keyid;
@@ -622,11 +622,11 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
else
key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
}
- offset += FIELD_SIZEOF(struct gre_full_hdr, key);
+ offset += sizeof_field(struct gre_full_hdr, key);
}
if (hdr->flags & GRE_SEQ)
- offset += FIELD_SIZEOF(struct pptp_gre_header, seq);
+ offset += sizeof_field(struct pptp_gre_header, seq);
if (gre_ver == 0) {
if (*p_proto == htons(ETH_P_TEB)) {
@@ -653,7 +653,7 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
u8 *ppp_hdr;
if (hdr->flags & GRE_ACK)
- offset += FIELD_SIZEOF(struct pptp_gre_header, ack);
+ offset += sizeof_field(struct pptp_gre_header, ack);
ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
sizeof(_ppp_hdr),
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 7c8390ad4dc6..8310714c47fd 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -36,7 +36,7 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
const u32 *k = data;
const u32 key = *k;
- BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
+ BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
!= sizeof(u32));
/* Use cyclic increasing ID as direct hash key */
@@ -56,7 +56,7 @@ static const struct rhashtable_params mem_id_rht_params = {
.nelem_hint = 64,
.head_offset = offsetof(struct xdp_mem_allocator, node),
.key_offset = offsetof(struct xdp_mem_allocator, mem.id),
- .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
+ .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
.max_size = MEM_ID_MAX,
.min_size = 8,
.automatic_shrinking = true,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a52e8ba1ced0..4af8a98fe784 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1132,7 +1132,7 @@ static int __init dccp_init(void)
int rc;
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
if (rc)
goto out_fail;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 572b6307a2df..8274f98c511c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1464,8 +1464,8 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
- [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
- [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
+ [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_TOS] = { .type = NLA_U8 },
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index cfb025606793..9b153c7fcbb4 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -580,8 +580,8 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
[IFLA_VTI_LINK] = { .type = NLA_U32 },
[IFLA_VTI_IKEY] = { .type = NLA_U32 },
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
- [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
- [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VTI_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
+ [IFLA_VTI_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_VTI_FWMARK] = { .type = NLA_U32 },
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8a39ee794891..3e50ac24fe41 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3949,7 +3949,7 @@ void __init tcp_init(void)
BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 923034c52ce4..9d0965252ddf 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -2170,8 +2170,8 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
- [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
- [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+ [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) },
+ [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) },
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
[IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index ebb62a4ebe30..c4bdcbc84b07 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -50,7 +50,7 @@ static struct iucv_interface *pr_iucv;
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
-#define TRGCLS_SIZE FIELD_SIZEOF(struct iucv_message, class)
+#define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 062b73a83af0..c26a5663795e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -7595,7 +7595,7 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len)
return -EINVAL;
if (len == 0)
return -EINVAL;
- if (reg * NFT_REG32_SIZE + len > FIELD_SIZEOF(struct nft_regs, data))
+ if (reg * NFT_REG32_SIZE + len > sizeof_field(struct nft_regs, data))
return -ERANGE;
return 0;
@@ -7643,7 +7643,7 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
if (len == 0)
return -EINVAL;
if (reg * NFT_REG32_SIZE + len >
- FIELD_SIZEOF(struct nft_regs, data))
+ sizeof_field(struct nft_regs, data))
return -ERANGE;
if (data != NULL && type != NFT_DATA_VALUE)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 7525063c25f5..de3a9596b7f1 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -236,7 +236,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
nla_strlcpy(helper->name,
tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
- if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
+ if (size > sizeof_field(struct nf_conn_help, data)) {
ret = -ENOMEM;
goto err2;
}
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 46ca8bcca1bd..faea72c2df32 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -440,12 +440,12 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
switch (ctx->family) {
case NFPROTO_IPV4:
- len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+ len = sizeof_field(struct nf_conntrack_tuple,
src.u3.ip);
break;
case NFPROTO_IPV6:
case NFPROTO_INET:
- len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+ len = sizeof_field(struct nf_conntrack_tuple,
src.u3.ip6);
break;
default:
@@ -457,20 +457,20 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
- len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip);
+ len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip);
break;
case NFT_CT_SRC_IP6:
case NFT_CT_DST_IP6:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
- len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip6);
+ len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip6);
break;
case NFT_CT_PROTO_SRC:
case NFT_CT_PROTO_DST:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
- len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u.all);
+ len = sizeof_field(struct nf_conntrack_tuple, src.u.all);
break;
case NFT_CT_BYTES:
case NFT_CT_PKTS:
@@ -551,7 +551,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
case NFT_CT_MARK:
if (tb[NFTA_CT_DIRECTION])
return -EINVAL;
- len = FIELD_SIZEOF(struct nf_conn, mark);
+ len = sizeof_field(struct nf_conn, mark);
break;
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 39dc94f2491e..bc9fd98c5d6d 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- u32 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+ u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
struct nft_masq *priv = nft_expr_priv(expr);
int err;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index c3c93e95b46e..8b44a4de5329 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -141,10 +141,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (family) {
case NFPROTO_IPV4:
- alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip);
+ alen = sizeof_field(struct nf_nat_range, min_addr.ip);
break;
case NFPROTO_IPV6:
- alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
+ alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
break;
default:
return -EAFNOSUPPORT;
@@ -171,7 +171,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
}
}
- plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+ plen = sizeof_field(struct nf_nat_range, min_addr.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
priv->sreg_proto_min =
nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 43eeb1f609f1..5b779171565c 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
unsigned int plen;
int err;
- plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+ plen = sizeof_field(struct nf_nat_range, min_addr.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
priv->sreg_proto_min =
nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index f92a82c73880..4c33dfc9dab5 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -218,14 +218,14 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
switch (priv->family) {
case NFPROTO_IPV4:
- alen = FIELD_SIZEOF(union nf_inet_addr, in);
+ alen = sizeof_field(union nf_inet_addr, in);
err = nf_defrag_ipv4_enable(ctx->net);
if (err)
return err;
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
- alen = FIELD_SIZEOF(union nf_inet_addr, in6);
+ alen = sizeof_field(union nf_inet_addr, in6);
err = nf_defrag_ipv6_enable(ctx->net);
if (err)
return err;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 2236455b10a3..37253d399c6b 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -30,7 +30,7 @@ static unsigned int jhash_rnd __read_mostly;
static unsigned int xt_rateest_hash(const char *name)
{
- return jhash(name, FIELD_SIZEOF(struct xt_rateest, name), jhash_rnd) &
+ return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) &
(RATEEST_HSIZE - 1);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 90b2ab9dd449..4e31721e7293 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2755,7 +2755,7 @@ static int __init netlink_proto_init(void)
if (err != 0)
goto out;
- BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
if (!nl_table)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 1047e8043084..e3a37d22539c 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2497,7 +2497,7 @@ static int __init dp_init(void)
{
int err;
- BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n");
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index fd8ed766bdd1..758a8c77f736 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -37,7 +37,7 @@ enum sw_flow_mac_proto {
* matching for small options.
*/
#define TUN_METADATA_OFFSET(opt_len) \
- (FIELD_SIZEOF(struct sw_flow_key, tun_opts) - opt_len)
+ (sizeof_field(struct sw_flow_key, tun_opts) - opt_len)
#define TUN_METADATA_OPTS(flow_key, opt_len) \
((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
@@ -52,7 +52,7 @@ struct vlan_head {
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
(offsetof(struct sw_flow_key, recirc_id) + \
- FIELD_SIZEOF(struct sw_flow_key, recirc_id))
+ sizeof_field(struct sw_flow_key, recirc_id))
struct ovs_key_nsh {
struct ovs_nsh_key_base base;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d72ddb67bb74..9d3c4d2d893a 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -972,7 +972,7 @@ static int __init af_rxrpc_init(void)
int ret = -1;
unsigned int tmp;
- BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
get_random_bytes(&tmp, sizeof(tmp));
tmp &= 0x3fffffff;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index bf2d69335d4b..f685c0d73708 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -312,7 +312,7 @@ static void tcf_ct_act_set_labels(struct nf_conn *ct,
u32 *labels_m)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
- size_t labels_sz = FIELD_SIZEOF(struct tcf_ct_params, labels);
+ size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
if (!memchr_inv(labels_m, 0, labels_sz))
return;
@@ -936,7 +936,7 @@ static struct tc_action_ops act_ct_ops = {
static __net_init int ct_init_net(struct net *net)
{
- unsigned int n_bits = FIELD_SIZEOF(struct tcf_ct_params, labels) * 8;
+ unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
if (nf_connlabels_get(net, n_bits - 1)) {
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6c68971d99df..0d125de54285 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1481,7 +1481,7 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
}
#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
-#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
+#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
#define FL_KEY_IS_MASKED(mask, member) \
memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
diff --git a/net/socket.c b/net/socket.c
index 4d38d49d6ad9..50623218747f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -957,7 +957,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
.msg_iocb = iocb};
ssize_t res;
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (iocb->ki_pos != 0)
@@ -982,7 +982,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_pos != 0)
return -ESPIPE;
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (sock->type == SOCK_SEQPACKET)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 7cfdce10de36..774babbee045 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2865,7 +2865,7 @@ static int __init af_unix_init(void)
{
int rc = -1;
- BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
rc = proto_register(&unix_proto, 1);
if (rc != 0) {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 7cbe6e72e363..a63380c6b0d2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -4125,15 +4125,6 @@ sub process {
"Prefer [subsystem eg: netdev]_$level2([subsystem]dev, ... then dev_$level2(dev, ... then pr_$level(... to printk(KERN_$orig ...\n" . $herecurr);
}
- if ($line =~ /\bpr_warning\s*\(/) {
- if (WARN("PREFER_PR_LEVEL",
- "Prefer pr_warn(... to pr_warning(...\n" . $herecurr) &&
- $fix) {
- $fixed[$fixlinenr] =~
- s/\bpr_warning\b/pr_warn/;
- }
- }
-
if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
my $orig = $1;
my $level = lc($orig);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index f19a895ad7cd..ef8dfd47c7e3 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -45,7 +45,7 @@
#define DONT_HASH 0x0200
#define INVALID_PCR(a) (((a) < 0) || \
- (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
+ (a) >= (sizeof_field(struct integrity_iint_cache, measured_pcrs) * 8))
int ima_policy_flag;
static int temp_ima_appraise;
@@ -274,7 +274,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
* lsm rules can change
*/
memcpy(nentry, entry, sizeof(*nentry));
- memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
+ memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
for (i = 0; i < MAX_LSM_RULES; i++) {
if (!entry->lsm[i].rule)
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index 4e3bd9a2bec0..bd91c6ecb112 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -247,7 +247,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&ff->mutex);
}
- return 0;
+ return err;
}
static int pcm_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index 349b4d09e84f..005970931030 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -177,18 +177,14 @@ static int pcm_open(struct snd_pcm_substream *substream)
err = snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
frames_per_period, frames_per_period);
- if (err < 0) {
- mutex_unlock(&motu->mutex);
+ if (err < 0)
goto err_locked;
- }
err = snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
frames_per_buffer, frames_per_buffer);
- if (err < 0) {
- mutex_unlock(&motu->mutex);
+ if (err < 0)
goto err_locked;
- }
}
}
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9124603edabe..67fd3e844dd6 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -285,7 +285,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&oxfw->mutex);
}
- return 0;
+ return err;
}
static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 50d4a87a6bb3..f02f5b1568de 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -635,36 +635,30 @@ This function assumes there are no more than 16 in/out busses or pipes
Meters is an array [3][16][2] of long. */
static void get_audio_meters(struct echoaudio *chip, long *meters)
{
- int i, m, n;
+ unsigned int i, m, n;
- m = 0;
- n = 0;
- for (i = 0; i < num_busses_out(chip); i++, m++) {
+ for (i = 0 ; i < 96; i++)
+ meters[i] = 0;
+
+ for (m = 0, n = 0, i = 0; i < num_busses_out(chip); i++, m++) {
meters[n++] = chip->comm_page->vu_meter[m];
meters[n++] = chip->comm_page->peak_meter[m];
}
- for (; n < 32; n++)
- meters[n] = 0;
#ifdef ECHOCARD_ECHO3G
m = E3G_MAX_OUTPUTS; /* Skip unused meters */
#endif
- for (i = 0; i < num_busses_in(chip); i++, m++) {
+ for (n = 32, i = 0; i < num_busses_in(chip); i++, m++) {
meters[n++] = chip->comm_page->vu_meter[m];
meters[n++] = chip->comm_page->peak_meter[m];
}
- for (; n < 64; n++)
- meters[n] = 0;
-
#ifdef ECHOCARD_HAS_VMIXER
- for (i = 0; i < num_pipes_out(chip); i++, m++) {
+ for (n = 64, i = 0; i < num_pipes_out(chip); i++, m++) {
meters[n++] = chip->comm_page->vu_meter[m];
meters[n++] = chip->comm_page->peak_meter[m];
}
#endif
- for (; n < 96; n++)
- meters[n] = 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 35b4526f0d28..b856b89378ac 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1419,7 +1419,6 @@ static bool atpx_present(void)
return true;
}
}
- pci_dev_put(pdev);
}
return false;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6d6e34b3b3aa..dbfafee97931 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7643,11 +7643,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1a, 0x90a70130},
{0x1b, 0x90170110},
{0x21, 0x03211020}),
- SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
- {0x12, 0xb7a60130},
- {0x13, 0xb8a61140},
- {0x16, 0x90170110},
- {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x14, 0x90170110},
@@ -7841,6 +7836,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x19, 0x40000000},
{0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
{}
};
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index f8b5b960e597..4eaa2b5b20a5 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -292,7 +292,7 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- uinfo->count = FIELD_SIZEOF(struct hdmi_codec_priv, eld);
+ uinfo->count = sizeof_field(struct hdmi_codec_priv, eld);
return 0;
}