aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2020-01-21 09:55:04 -0400
committerJason Gunthorpe <jgg@mellanox.com>2020-01-21 09:55:04 -0400
commite8b3a426fb4a9e2856a69b6e19de044c7416c316 (patch)
treee902f402349a14df8a733e8d4f417122673f6c24
parentIB/mlx4: Fix memory leak in add_gid error flow (diff)
parentnet/rds: Use prefetch for On-Demand-Paging MR (diff)
downloadlinux-dev-e8b3a426fb4a9e2856a69b6e19de044c7416c316.tar.xz
linux-dev-e8b3a426fb4a9e2856a69b6e19de044c7416c316.zip
Merge tag 'rds-odp-for-5.5' into rdma.git for-next
From https://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma Leon Romanovsky says: ==================== Use ODP MRs for kernel ULPs The following series extends MR creation routines to allow creation of user MRs through kernel ULPs as a proxy. The immediate use case is to allow RDS to work over FS-DAX, which requires ODP (on-demand-paging) MRs to be created and such MRs were not possible to create prior this series. The first part of this patchset extends RDMA to have special verb ib_reg_user_mr(). The common use case that uses this function is a userspace application that allocates memory for HCA access but the responsibility to register the memory at the HCA is on an kernel ULP. This ULP acts as an agent for the userspace application. The second part provides advise MR functionality for ULPs. This is integral part of ODP flows and used to trigger pagefaults in advance to prepare memory before running working set. The third part is actual user of those in-kernel APIs. ==================== * tag 'rds-odp-for-5.5': net/rds: Use prefetch for On-Demand-Paging MR net/rds: Handle ODP mr registration/unregistration net/rds: Detect need of On-Demand-Paging memory registration RDMA/mlx5: Fix handling of IOVA != user_va in ODP paths IB/mlx5: Mask out unsupported ODP capabilities for kernel QPs RDMA/mlx5: Don't fake udata for kernel path IB/mlx5: Add ODP WQE handlers for kernel QPs IB/core: Add interface to advise_mr for kernel users IB/core: Introduce ib_reg_user_mr IB: Allow calls to ib_umem_get from kernel ULPs Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--Documentation/dev-tools/kcov.rst10
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-at91.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml4
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt2
-rw-r--r--Documentation/networking/dsa/sja1105.rst6
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/networking/netdev-FAQ.rst4
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/riscv/index.rst1
-rw-r--r--Documentation/riscv/patch-acceptance.rst35
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/entry-arcv2.h8
-rw-r--r--arch/arc/include/asm/hugepage.h1
-rw-r--r--arch/arc/kernel/asm-offsets.c10
-rw-r--r--arch/arc/plat-eznps/Kconfig2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/kernel/process.c6
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/unistd.h1
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/process.c10
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/hexagon/include/asm/atomic.h8
-rw-r--r--arch/hexagon/include/asm/bitops.h8
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h2
-rw-r--r--arch/hexagon/include/asm/futex.h6
-rw-r--r--arch/hexagon/include/asm/io.h1
-rw-r--r--arch/hexagon/include/asm/spinlock.h20
-rw-r--r--arch/hexagon/kernel/stacktrace.c4
-rw-r--r--arch/hexagon/kernel/vm_entry.S2
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/boot/compressed/Makefile3
-rw-r--r--arch/mips/include/asm/cpu-type.h3
-rw-r--r--arch/mips/include/asm/thread_info.h20
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h13
-rw-r--r--arch/mips/kernel/cacheinfo.c27
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rw-r--r--arch/mips/vdso/vgettimeofday.c20
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/process.c8
-rw-r--r--arch/powerpc/include/asm/spinlock.h1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/slice.c4
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi15
-rw-r--r--arch/riscv/include/asm/csr.h18
-rw-r--r--arch/riscv/kernel/ftrace.c2
-rw-r--r--arch/riscv/kernel/head.S2
-rw-r--r--arch/riscv/kernel/irq.c6
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/mm/init.c12
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/sh/mm/init.c4
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/include/asm/ptrace-generic.h2
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/um/tls_32.c6
-rw-r--r--arch/x86/um/tls_64.c7
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/kernel/process.c8
-rw-r--r--block/bio.c49
-rw-r--r--block/blk-merge.c18
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/block/null_blk_zoned.c5
-rw-r--r--drivers/char/agp/isoch.c9
-rw-r--r--drivers/char/tpm/tpm-dev-common.c2
-rw-r--r--drivers/char/tpm/tpm-dev.h2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c34
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/dma/dma-jz4780.c3
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/k3dma.c12
-rw-r--r--drivers/dma/virt-dma.c3
-rw-r--r--drivers/edac/sifive_edac.c2
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c1
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-mockup.c4
-rw-r--r--drivers/gpio/gpio-zynq.c8
-rw-r--r--drivers/gpio/gpiolib-acpi.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c31
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c67
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c16
-rw-r--r--drivers/hid/hid-ite.c3
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-steam.c4
-rw-r--r--drivers/hid/hidraw.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c16
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c2
-rw-r--r--drivers/hid/uhid.c5
-rw-r--r--drivers/hid/usbhid/hiddev.c97
-rw-r--r--drivers/hid/wacom_wac.c6
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c17
-rw-r--r--drivers/i2c/i2c-core-base.c13
-rw-r--r--drivers/infiniband/core/umem.c27
-rw-r--r--drivers/infiniband/core/umem_odp.c29
-rw-r--r--drivers/infiniband/core/verbs.c41
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c51
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c20
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c33
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c167
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/input/evdev.c14
-rw-r--r--drivers/input/input.c26
-rw-r--r--drivers/input/keyboard/imx_sc_key.c8
-rw-r--r--drivers/input/misc/uinput.c19
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/intel-iommu.c22
-rw-r--r--drivers/iommu/iommu.c1
-rw-r--r--drivers/irqchip/irq-sifive-plic.c2
-rw-r--r--drivers/media/cec/cec-adap.c40
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c17
-rw-r--r--drivers/mtd/nand/onenand/omap2.c14
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c14
-rw-r--r--drivers/mtd/nand/onenand/samsung_mtd.c8
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c13
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c38
-rw-r--r--drivers/mtd/sm_ftl.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c63
-rw-r--r--drivers/net/can/mscan/mscan.c21
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c2
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c18
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/aquantia_main.c2
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/usb/lan78xx.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c12
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c128
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/pinctrl/cirrus/Kconfig1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c1
-rw-r--r--drivers/platform/mips/Kconfig2
-rw-r--r--drivers/powercap/intel_rapl_common.c3
-rw-r--r--drivers/ptp/ptp_clock.c31
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/regulator/axp20x-regulator.c11
-rw-r--r--drivers/regulator/bd70528-regulator.c1
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c15
-rw-r--r--drivers/rtc/rtc-mt6397.c39
-rw-r--r--drivers/rtc/rtc-sun6i.c16
-rw-r--r--drivers/s390/net/qeth_core_main.c29
-rw-r--r--drivers/s390/net/qeth_l2_main.c10
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_sys.c40
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c2
-rw-r--r--drivers/spi/spi-dw.c15
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-fsl-dspi.c24
-rw-r--r--drivers/spi/spi-uniphier.c31
-rw-r--r--drivers/spi/spi.c22
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c4
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/vt6656/baseband.c4
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/device.h1
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/usbpipe.c25
-rw-r--r--drivers/staging/vt6656/usbpipe.h5
-rw-r--r--drivers/staging/vt6656/wcmd.c1
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/tty/serdev/core.c10
-rw-r--r--drivers/tty/tty_port.c3
-rw-r--r--drivers/usb/cdns3/gadget.c14
-rw-r--r--drivers/usb/chipidea/host.c4
-rw-r--r--drivers/usb/core/config.c82
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/dwc3/gadget.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/host/ohci-da8xx.c8
-rw-r--r--drivers/usb/musb/jz4740.c7
-rw-r--r--drivers/usb/musb/musb_core.c11
-rw-r--r--drivers/usb/musb/musbhsdma.c2
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/serial/usb-wwan.h1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c20
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h18
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c4
-rw-r--r--drivers/watchdog/rn5t618_wdt.c1
-rw-r--r--drivers/watchdog/w83627hf_wdt.c2
-rw-r--r--fs/btrfs/compression.c7
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/buffer.c33
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/file.c7
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io_uring.c12
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nsfs.c3
-rw-r--r--fs/ocfs2/dlmglue.c1
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/posix_acl.c7
-rw-r--r--fs/pstore/ram.c13
-rw-r--r--fs/pstore/ram_core.c2
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/bvec.h22
-rw-r--r--include/linux/can/dev.h34
-rw-r--r--include/linux/dmaengine.h5
-rw-r--r--include/linux/if_ether.h8
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/mt6397/rtc.h8
-rw-r--r--include/linux/mtd/flashchip.h2
-rw-r--r--include/linux/of_mdio.h2
-rw-r--r--include/linux/posix-clock.h19
-rw-r--r--include/linux/spi/spi.h4
-rw-r--r--include/linux/sxgbe_platform.h2
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/net/dst.h13
-rw-r--r--include/net/dst_ops.h3
-rw-r--r--include/net/netfilter/nf_flow_table.h6
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--include/rdma/ib_umem.h4
-rw-r--r--include/rdma/ib_umem_odp.h6
-rw-r--r--include/rdma/ib_verbs.h9
-rw-r--r--include/soc/sifive/sifive_l2_cache.h (renamed from arch/riscv/include/asm/sifive_l2_cache.h)6
-rw-r--r--include/trace/events/preemptirq.h8
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/kcov.h10
-rw-r--r--init/main.c26
-rw-r--r--kernel/bpf/cgroup.c11
-rw-r--r--kernel/bpf/verifier.c52
-rw-r--r--kernel/cred.c6
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/seccomp.c7
-rw-r--r--kernel/taskstats.c30
-rw-r--r--kernel/time/posix-clock.c31
-rw-r--r--kernel/trace/fgraph.c14
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace_events_inject.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_seq.c2
-rw-r--r--kernel/trace/trace_stack.c5
-rw-r--r--mm/gup_benchmark.c8
-rw-r--r--mm/hugetlb.c51
-rw-r--r--mm/memory_hotplug.c31
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/zsmalloc.c5
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/8021q/vlan_netlink.c19
-rw-r--r--net/bridge/br_nf_core.c3
-rw-r--r--net/bridge/netfilter/ebtables.c33
-rw-r--r--net/decnet/dn_route.c6
-rw-r--r--net/hsr/hsr_debugfs.c52
-rw-r--r--net/hsr/hsr_device.c28
-rw-r--r--net/hsr/hsr_framereg.c73
-rw-r--r--net/hsr/hsr_framereg.h6
-rw-r--r--net/hsr/hsr_main.c7
-rw-r--r--net/hsr/hsr_main.h22
-rw-r--r--net/hsr/hsr_netlink.c1
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c27
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c3
-rw-r--r--net/netfilter/nf_flow_table_core.c7
-rw-r--r--net/netfilter/nf_flow_table_ip.c4
-rw-r--r--net/netfilter/nf_flow_table_offload.c52
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/nft_flow_offload.c3
-rw-r--r--net/netfilter/nft_tproxy.c4
-rw-r--r--net/qrtr/qrtr.c2
-rw-r--r--net/rds/ib.c7
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_mr.h7
-rw-r--r--net/rds/ib_rdma.c84
-rw-r--r--net/rds/ib_send.c44
-rw-r--r--net/rds/rdma.c157
-rw-r--r--net/rds/rds.h13
-rw-r--r--net/rxrpc/ar-internal.h10
-rw-r--r--net/rxrpc/call_accept.c60
-rw-r--r--net/rxrpc/conn_event.c16
-rw-r--r--net/rxrpc/conn_service.c4
-rw-r--r--net/rxrpc/input.c18
-rw-r--r--net/rxrpc/rxkad.c5
-rw-r--r--net/rxrpc/security.c70
-rw-r--r--net/sched/act_mirred.c22
-rw-r--r--net/sched/cls_api.c31
-rw-r--r--net/sched/cls_flower.c12
-rw-r--r--net/sched/cls_u32.c25
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sched/sch_fq.c23
-rw-r--r--net/sched/sch_prio.c10
-rw-r--r--net/sctp/sm_sideeffect.c28
-rw-r--r--net/sctp/stream.c30
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/netlink_compat.c4
-rw-r--r--net/tipc/socket.c57
-rw-r--r--samples/seccomp/user-trap.c4
-rw-r--r--scripts/gcc-plugins/Kconfig9
-rwxr-xr-xscripts/package/mkdebian2
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/domain.c82
-rw-r--r--security/apparmor/file.c12
-rw-r--r--security/apparmor/mount.c2
-rw-r--r--security/apparmor/policy.c4
-rw-r--r--security/tomoyo/common.c9
-rw-r--r--security/tomoyo/domain.c15
-rw-r--r--security/tomoyo/group.c9
-rw-r--r--security/tomoyo/realpath.c32
-rw-r--r--security/tomoyo/util.c6
-rw-r--r--sound/hda/hdac_regmap.c1
-rw-r--r--sound/pci/hda/hda_intel.c21
-rw-r--r--sound/pci/hda/patch_realtek.c49
-rw-r--r--sound/pci/ice1712/ice1724.c9
-rw-r--r--sound/soc/fsl/fsl_audmix.c9
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c1
-rw-r--r--sound/soc/soc-core.c14
-rw-r--r--sound/soc/soc-topology.c6
-rw-r--r--sound/soc/sof/imx/imx8.c5
-rw-r--r--sound/soc/sof/intel/hda-dai.c11
-rw-r--r--sound/soc/sof/ipc.c3
-rw-r--r--sound/soc/stm/stm32_spdifrx.c40
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/pcm.c25
-rw-r--r--sound/usb/quirks-table.h3
-rw-r--r--sound/usb/quirks.c12
-rw-r--r--sound/usb/usbaudio.h3
-rw-r--r--tools/lib/bpf/Makefile15
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rwxr-xr-xtools/testing/selftests/net/forwarding/loopback.sh8
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh39
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c15
-rwxr-xr-xusr/gen_initramfs_list.sh2
472 files changed, 3512 insertions, 2037 deletions
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 36890b026e77..1c4e1825d769 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -251,11 +251,11 @@ selectively from different subsystems.
.. code-block:: c
struct kcov_remote_arg {
- unsigned trace_mode;
- unsigned area_size;
- unsigned num_handles;
- uint64_t common_handle;
- uint64_t handles[0];
+ __u32 trace_mode;
+ __u32 area_size;
+ __u32 num_handles;
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
};
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
index 2210f4359c45..8347b1e7c080 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
@@ -18,8 +18,10 @@ Optional properties:
- dma-names: should contain "tx" and "rx".
- atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
capable I2C controllers.
-- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c"
- and "atmel,sama5d2-i2c".
+- i2c-sda-hold-time-ns: TWD hold time, only available for:
+ "atmel,sama5d4-i2c",
+ "atmel,sama5d2-i2c",
+ "microchip,sam9x60-i2c".
- Child nodes conforming to i2c bus binding
Examples :
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 732339275848..1e0ca6ccf64b 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -111,7 +111,7 @@ patternProperties:
spi-rx-bus-width:
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4 ]
+ - enum: [ 1, 2, 4, 8 ]
- default: 1
description:
Bus width to the SPI bus used for MISO.
@@ -123,7 +123,7 @@ patternProperties:
spi-tx-bus-width:
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4 ]
+ - enum: [ 1, 2, 4, 8 ]
- default: 1
description:
Bus width to the SPI bus used for MOSI.
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 059d58a549c7..6fb2b0671994 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -23,7 +23,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | ok |
| sparc: | TODO |
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
index eef20d0bcf7c..64553d8d91cb 100644
--- a/Documentation/networking/dsa/sja1105.rst
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules
against this restriction and errors out when appropriate. Schedule analysis is
needed to avoid this, which is outside the scope of the document.
-At the moment, the time-aware scheduler can only be triggered based on a
-standalone clock and not based on PTP time. This means the base-time argument
-from tc-taprio is ignored and the schedule starts right away. It also means it
-is more difficult to phase-align the scheduler with the other devices in the
-network.
-
Device Tree bindings and board design
=====================================
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fd26788e8c96..48ccb1b31160 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER
with the current initial RTO of 1second. With this the final timeout
for a passive TCP connection will happen after 63seconds.
-tcp_syncookies - BOOLEAN
+tcp_syncookies - INTEGER
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
Send out syncookies when the syn backlog queue of a socket
overflows. This is to prevent against the common 'SYN flood attack'
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 642fa963be3c..d5c9320901c3 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the
mainline tree from Linus, and ``net-next`` is where the new code goes
for the future release. You can find the trees here:
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
----------------------------------------------------------------------------
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 21aa7d5358e6..6399d92f0b21 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -60,6 +60,7 @@ lack of a better place.
volatile-considered-harmful
botching-up-ioctls
clang-format
+ ../riscv/patch-acceptance
.. only:: subproject and html
diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst
index 215fd3c1f2d5..fa33bffd8992 100644
--- a/Documentation/riscv/index.rst
+++ b/Documentation/riscv/index.rst
@@ -7,6 +7,7 @@ RISC-V architecture
boot-image-header
pmu
+ patch-acceptance
.. only:: subproject and html
diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst
new file mode 100644
index 000000000000..dfe0ac5624fb
--- /dev/null
+++ b/Documentation/riscv/patch-acceptance.rst
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+arch/riscv maintenance guidelines for developers
+================================================
+
+Overview
+--------
+The RISC-V instruction set architecture is developed in the open:
+in-progress drafts are available for all to review and to experiment
+with implementations. New module or extension drafts can change
+during the development process - sometimes in ways that are
+incompatible with previous drafts. This flexibility can present a
+challenge for RISC-V Linux maintenance. Linux maintainers disapprove
+of churn, and the Linux development process prefers well-reviewed and
+tested code over experimental code. We wish to extend these same
+principles to the RISC-V-related code that will be accepted for
+inclusion in the kernel.
+
+Submit Checklist Addendum
+-------------------------
+We'll only accept patches for new modules or extensions if the
+specifications for those modules or extensions are listed as being
+"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of
+course, maintain their own Linux kernel trees that contain code for
+any draft extensions that they wish.)
+
+Additionally, the RISC-V specification allows implementors to create
+their own custom extensions. These custom extensions aren't required
+to go through any review or ratification process by the RISC-V
+Foundation. To avoid the maintenance complexity and potential
+performance impact of adding kernel code for implementor-specific
+RISC-V extensions, we'll only to accept patches for extensions that
+have been officially frozen or ratified by the RISC-V Foundation.
+(Implementors, may, of course, maintain their own Linux kernel trees
+containing code for any custom extensions that they wish.)
diff --git a/MAINTAINERS b/MAINTAINERS
index e09bd92a1e44..4017e6b760be 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -771,6 +771,8 @@ F: drivers/thermal/thermal_mmio.c
AMAZON ETHERNET DRIVERS
M: Netanel Belgazal <netanel@amazon.com>
+M: Arthur Kiyanovski <akiyano@amazon.com>
+R: Guy Tzalik <gtzalik@amazon.com>
R: Saeed Bishara <saeedb@amazon.com>
R: Zorik Machulsky <zorik@amazon.com>
L: netdev@vger.kernel.org
@@ -11458,8 +11460,8 @@ M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
S: Odd Fixes
F: Documentation/devicetree/bindings/net/
F: drivers/net/
@@ -11500,8 +11502,8 @@ M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
B: mailto:netdev@vger.kernel.org
S: Maintained
F: net/
@@ -11546,7 +11548,7 @@ M: "David S. Miller" <davem@davemloft.net>
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
L: netdev@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
S: Maintained
F: net/ipv4/
F: net/ipv6/
@@ -13677,7 +13679,6 @@ F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <vkoul@kernel.org>
-M: Niklas Cassel <niklas.cassel@linaro.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -14119,6 +14120,7 @@ M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
L: linux-riscv@lists.infradead.org
+P: Documentation/riscv/patch-acceptance.rst
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
S: Supported
F: arch/riscv/
@@ -14546,8 +14548,6 @@ F: include/linux/platform_data/spi-s3c64xx.h
SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an@samsung.com>
-M: Girish K S <ks.giri@samsung.com>
-M: Vipul Pandya <vipul.pandya@samsung.com>
S: Supported
L: netdev@vger.kernel.org
F: drivers/net/ethernet/samsung/sxgbe/
diff --git a/Makefile b/Makefile
index b99d95df8075..0a7c37dcc5ac 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 41b16f21beec..0b8b63d0bec1 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -162,7 +162,7 @@
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
- ST2 r58, r59, PT_sp + 12
+ ST2 r58, r59, PT_r58
#endif
.endm
@@ -172,8 +172,8 @@
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
- ld r12, [sp, PT_sp + 4]
- ld r30, [sp, PT_sp + 8]
+ ld r12, [sp, PT_r12]
+ ld r30, [sp, PT_r30]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
@@ -190,7 +190,7 @@
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
- LD2 r58, r59, PT_sp + 12
+ LD2 r58, r59, PT_r58
#endif
.endm
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 9a74ce71a767..30ac40fed2c5 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -8,7 +8,6 @@
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index 1f621e416521..c783bcd35eb8 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -66,7 +66,15 @@ int main(void)
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
- DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
+#ifdef CONFIG_ISA_ARCV2
+ OFFSET(PT_r12, pt_regs, r12);
+ OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ OFFSET(PT_r58, pt_regs, r58);
+ OFFSET(PT_r59, pt_regs, r59);
+#endif
return 0;
}
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
index a376a50d3fea..a931d0a256d0 100644
--- a/arch/arc/plat-eznps/Kconfig
+++ b/arch/arc/plat-eznps/Kconfig
@@ -7,7 +7,7 @@
menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform"
select CPU_BIG_ENDIAN
- select CLKSRC_NPS
+ select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
help
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba75e3661a41..96dab76da3b3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -72,6 +72,7 @@ config ARM
select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index cea1c27c29cb..46e478fb5ea2 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task)
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int
-copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, unsigned long tls)
{
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
@@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
- thread->tp_value[0] = childregs->ARM_r3;
+ thread->tp_value[0] = tls;
thread->tp_value[1] = get_tpuser();
thread_notify(THREAD_NOTIFY_COPY, thread);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1b4476ddb83..e688dfad0b72 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -138,6 +138,7 @@ config ARM64
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 8dc6c5cdabe6..baf52baaa2a5 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -85,13 +85,12 @@
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_EXECONLY
+#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
@@ -100,7 +99,7 @@
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECONLY
+#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 5d15b4735a0e..cd5de0e40bfa 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/*
* p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 2629a68b8724..5af82587909e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -42,7 +42,6 @@
#endif
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#ifndef __COMPAT_SYSCALL_NR
#include <uapi/asm/unistd.h>
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index 4703d218663a..f83a70e07df8 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -19,5 +19,6 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 71f788cd2b18..d54586d5b031 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
@@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
/*
- * If a TLS pointer was passed to clone (4th argument), use it
- * for the new thread.
+ * If a TLS pointer was passed to clone, use it for the new
+ * thread.
*/
if (clone_flags & CLONE_SETTLS)
- p->thread.uw.tp_value = childregs->regs[3];
+ p->thread.uw.tp_value = tls;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 077b02a2d4d3..85566d32958f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0;
- unsigned long vm_flags = VM_READ | VM_WRITE;
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (kprobe_page_fault(regs, esr))
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5a3b15a14a7f..40797cbfba2d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
/*
* FIXME: Cleanup page tables (also in arch_add_memory() in case
@@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
* unlocked yet.
*/
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
}
#endif
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 12cd9231c4b8..0231d69c8bf2 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%2);\n" \
" %1 = "#op "(%0,%3);\n" \
" memw_locked(%2,P3)=%1;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
" }"
" memw_locked(%2, p3) = %1;"
" {"
- " if !p3 jump 1b;"
+ " if (!p3) jump 1b;"
" }"
"2:"
: "=&r" (__oldval), "=&r" (tmp)
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 47384b094b94..71429f756af0 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -223,7 +223,7 @@ static inline int ffs(int x)
int r;
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
- "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
: "=&r" (r)
: "r" (x)
: "p0");
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 6091322c3af9..92b8a02e588a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n" /* load into retval */
" memw_locked(%1,P0) = %2;\n" /* store into memory */
- " if !P0 jump 1b;\n"
+ " if (!P0) jump 1b;\n"
: "=&r" (retval)
: "r" (ptr), "r" (x)
: "memory", "p0"
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index cb635216a732..0191f7c7193e 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -16,7 +16,7 @@
/* For example: %1 = %4 */ \
insn \
"2: memw_locked(%3,p2) = %1;\n" \
- " if !p2 jump 1b;\n" \
+ " if (!p2) jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
"1: %1 = memw_locked(%3)\n"
" {\n"
" p2 = cmp.eq(%1,%4)\n"
- " if !p2.new jump:NT 3f\n"
+ " if (!p2.new) jump:NT 3f\n"
" }\n"
"2: memw_locked(%3,p2) = %5\n"
- " if !p2 jump 1b\n"
+ " if (!p2) jump 1b\n"
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 539e3efcf39c..b0dbc3473172 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
#define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))
#define __raw_writel writel
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index bfe07d842ff3..ef103b73bec8 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
"1: R6 = memw_locked(%0);\n"
" R6 = add(R6,#-1);\n"
" memw_locked(%0,P3) = R6\n"
- " if !P3 jump 1b;\n"
+ " if (!P3) jump 1b;\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" { %0 = P3 }\n"
"1:\n"
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0)\n"
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1)\n"
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1b; R6 = #1; }\n"
+ " { if (!P3) jump 1b; R6 = #1; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
index 35f29423fda8..5ed02f699479 100644
--- a/arch/hexagon/kernel/stacktrace.c
+++ b/arch/hexagon/kernel/stacktrace.c
@@ -11,8 +11,6 @@
#include <linux/thread_info.h>
#include <linux/module.h>
-register unsigned long current_frame_pointer asm("r30");
-
struct stackframe {
unsigned long fp;
unsigned long rets;
@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
- fp = current_frame_pointer;
+ fp = (unsigned long)__builtin_frame_address(0);
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index 12242c27e2df..4023fdbea490 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -369,7 +369,7 @@ ret_from_fork:
R26.L = #LO(do_work_pending);
R0 = #VM_INT_DISABLE;
}
- if P0 jump check_work_pending
+ if (P0) jump check_work_pending
{
R0 = R25;
callr R24
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 58fd67068bac..b01d68a2d5d9 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
}
#endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index add388236f4e..ed8e28b0fb3e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -47,7 +47,7 @@ config MIPS
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
select HAVE_ASM_MODVERSIONS
- select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
+ select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
select HAVE_CONTEXT_TRACKING
select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 172801ed35b8..d859f079b771 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index c46c59b0f1b4..49f0061a6051 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -15,7 +15,8 @@
static inline int __pure __get_cpu_type(const int cpu_type)
{
switch (cpu_type) {
-#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
+ defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
case CPU_LOONGSON2EF:
#endif
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 4993db40482c..ee26f9a4575d 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -49,8 +49,26 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-/* How to get the thread information struct from C. */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
register struct thread_info *__current_thread_info __asm__("$28");
+#endif
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
index b08825531e9f..0ae9b4cbc153 100644
--- a/arch/mips/include/asm/vdso/gettimeofday.h
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -26,8 +26,6 @@
#define __VDSO_USE_SYSCALL ULLONG_MAX
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
static __always_inline long gettimeofday_fallback(
struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
return error ? -ret : ret;
}
-#else
-
-static __always_inline long gettimeofday_fallback(
- struct __kernel_old_timeval *_tv,
- struct timezone *_tz)
-{
- return -1;
-}
-
-#endif
-
static __always_inline long clock_gettime_fallback(
clockid_t _clkid,
struct __kernel_timespec *_ts)
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index f777e44653d5..47312c529410 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+
+ for_each_possible_cpu(cpu1)
+ if (cpus_are_siblings(cpu, cpu1))
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+ int cluster = cpu_cluster(&cpu_data[cpu]);
+
+ for_each_possible_cpu(cpu1)
+ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
static int __populate_cache_leaves(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
if (c->icache.waysize) {
+ /* L1 caches are per core */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
} else {
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
}
- if (c->scache.waysize)
+ if (c->scache.waysize) {
+ /* L2 cache is per cluster */
+ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+ }
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 3ec69d9cbe88..561154cbcc40 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1804,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int image_size;
u8 *image_ptr;
- if (!prog->jit_requested || MIPS_ISA_REV < 2)
+ if (!prog->jit_requested)
return prog;
tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c
index 6ebdc37c89fc..6b83b6376a4b 100644
--- a/arch/mips/vdso/vgettimeofday.c
+++ b/arch/mips/vdso/vgettimeofday.c
@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
return __cvdso_clock_gettime32(clock, ts);
}
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
int __vdso_clock_getres(clockid_t clock_id,
struct old_timespec32 *res)
{
@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
return __cvdso_clock_gettime(clock, ts);
}
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
int __vdso_clock_getres(clockid_t clock_id,
struct __kernel_timespec *res)
{
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b16237c95ea3..0c29d6cb2c8d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -62,6 +62,7 @@ config PARISC
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_COPY_THREAD_TLS
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index ecc5c2771208..230a6422b99f 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init);
* Copy architecture-specific thread state
*/
int
-copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
@@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &child_return;
- /* Setup thread TLS area from the 4th parameter in clone */
+ /* Setup thread TLS area */
if (clone_flags & CLONE_SETTLS)
- cregs->cr27 = cregs->gr[23];
+ cregs->cr27 = tls;
}
return 0;
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 1b55fc08f853..860228e917dc 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -15,6 +15,7 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
+#include <linux/jump_label.h>
#include <linux/irqflags.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 617c2777926f..f5535eae637f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
int ret;
- __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 42bbcd47cc85..dffe1a45b6ed 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
#endif
-static inline bool slice_addr_is_low(unsigned long addr)
+static inline notrace bool slice_addr_is_low(unsigned long addr)
{
u64 tmp = (u64)addr;
@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
mm_ctx_user_psize(&current->mm->context), 1);
}
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
int index, mask_index;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d8efbaa78d67..fa7dc03459e7 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -64,6 +64,8 @@ config RISCV
select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select HAVE_COPY_THREAD_TLS
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 70a1891e7cd0..a2e3d54e830c 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -54,6 +54,7 @@
reg = <1>;
riscv,isa = "rv64imafdc";
tlb-split;
+ next-level-cache = <&l2cache>;
cpu1_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -77,6 +78,7 @@
reg = <2>;
riscv,isa = "rv64imafdc";
tlb-split;
+ next-level-cache = <&l2cache>;
cpu2_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -100,6 +102,7 @@
reg = <3>;
riscv,isa = "rv64imafdc";
tlb-split;
+ next-level-cache = <&l2cache>;
cpu3_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -123,6 +126,7 @@
reg = <4>;
riscv,isa = "rv64imafdc";
tlb-split;
+ next-level-cache = <&l2cache>;
cpu4_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -253,6 +257,17 @@
#pwm-cells = <3>;
status = "disabled";
};
+ l2cache: cache-controller@2010000 {
+ compatible = "sifive,fu540-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic0>;
+ interrupts = <1 2 3>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ };
};
};
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0a62d2d68455..435b65532e29 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -116,9 +116,9 @@
# define SR_PIE SR_MPIE
# define SR_PP SR_MPP
-# define IRQ_SOFT IRQ_M_SOFT
-# define IRQ_TIMER IRQ_M_TIMER
-# define IRQ_EXT IRQ_M_EXT
+# define RV_IRQ_SOFT IRQ_M_SOFT
+# define RV_IRQ_TIMER IRQ_M_TIMER
+# define RV_IRQ_EXT IRQ_M_EXT
#else /* CONFIG_RISCV_M_MODE */
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
@@ -133,15 +133,15 @@
# define SR_PIE SR_SPIE
# define SR_PP SR_SPP
-# define IRQ_SOFT IRQ_S_SOFT
-# define IRQ_TIMER IRQ_S_TIMER
-# define IRQ_EXT IRQ_S_EXT
+# define RV_IRQ_SOFT IRQ_S_SOFT
+# define RV_IRQ_TIMER IRQ_S_TIMER
+# define RV_IRQ_EXT IRQ_S_EXT
#endif /* CONFIG_RISCV_M_MODE */
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
-#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
-#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
-#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
+#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
#ifndef __ASSEMBLY__
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index b94d8db5ddcc..c40fdcdeb950 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*/
old = *parent;
- if (function_graph_enter(old, self_addr, frame_pointer, parent))
+ if (!function_graph_enter(old, self_addr, frame_pointer, parent))
*parent = return_hooker;
}
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 797802c73dee..2227db63f895 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -251,7 +251,7 @@ ENTRY(reset_regs)
#ifdef CONFIG_FPU
csrr t0, CSR_MISA
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
- bnez t0, .Lreset_regs_done
+ beqz t0, .Lreset_regs_done
li t1, SR_FS
csrs CSR_STATUS, t1
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 3f07a91d5afb..345c4f2eba13 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
irq_enter();
switch (regs->cause & ~CAUSE_IRQ_FLAG) {
- case IRQ_TIMER:
+ case RV_IRQ_TIMER:
riscv_timer_interrupt();
break;
#ifdef CONFIG_SMP
- case IRQ_SOFT:
+ case RV_IRQ_SOFT:
/*
* We only use software interrupts to pass IPIs, so if a non-SMP
* system gets one, then we don't know what to do.
@@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
riscv_software_interrupt();
break;
#endif
- case IRQ_EXT:
+ case RV_IRQ_EXT:
handle_arch_irq(regs);
break;
default:
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 95a3031e5c7c..817cf7b0974c 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
@@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (usp) /* User fork */
childregs->sp = usp;
if (clone_flags & CLONE_SETTLS)
- childregs->tp = childregs->a5;
+ childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
p->thread.ra = (unsigned long)ret_from_fork;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 69f6678db7f3..965a8cf4829c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -99,13 +99,13 @@ static void __init setup_initrd(void)
pr_info("initrd not found or empty");
goto disable;
}
- if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
pr_err("initrd extends beyond end of memory");
goto disable;
}
size = initrd_end - initrd_start;
- memblock_reserve(__pa(initrd_start), size);
+ memblock_reserve(__pa_symbol(initrd_start), size);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@@ -124,8 +124,8 @@ void __init setup_bootmem(void)
{
struct memblock_region *reg;
phys_addr_t mem_size = 0;
- phys_addr_t vmlinux_end = __pa(&_end);
- phys_addr_t vmlinux_start = __pa(&_start);
+ phys_addr_t vmlinux_end = __pa_symbol(&_end);
+ phys_addr_t vmlinux_start = __pa_symbol(&_start);
/* Find the memory region containing the kernel */
for_each_memblock(memory, reg) {
@@ -445,7 +445,7 @@ static void __init setup_vm_final(void)
/* Setup swapper PGD for fixmap */
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
- __pa(fixmap_pgd_next),
+ __pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
/* Map all memory banks */
@@ -474,7 +474,7 @@ static void __init setup_vm_final(void)
clear_fixmap(FIX_PMD);
/* Move to swapper page table */
- csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
+ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
#else
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f0ce22220565..ac44bd76db4b 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
vmem_remove_mapping(start, size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index dfdbaa50946e..d1b1ff2be17a 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 2a6d04fcb3e9..6f0edd0c0220 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -14,6 +14,7 @@ config UML
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_COPY_THREAD_TLS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 81c647ef9c6c..adf91ef553ae 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request,
extern unsigned long getreg(struct task_struct *child, int regno);
extern int putreg(struct task_struct *child, int regno, unsigned long value);
-extern int arch_copy_tls(struct task_struct *new);
+extern int arch_set_tls(struct task_struct *new, unsigned long tls);
extern void clear_flushed_tls(struct task_struct *task);
extern int syscall_trace_enter(struct pt_regs *regs);
extern void syscall_trace_leave(struct pt_regs *regs);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 263a8f069133..17045e7211bf 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -153,8 +153,8 @@ void fork_handler(void)
userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct * p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct * p, unsigned long tls)
{
void (*handler)(void);
int kthread = current->flags & PF_KTHREAD;
@@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
- ret = arch_copy_tls(p);
+ ret = arch_set_tls(p, tls);
}
return ret;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 930edeb41ec3..0a74407ef92e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
}
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dcb9bc961b39..bcfede46fe02 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
- struct zone *zone = page_zone(page);
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index 5bd949da7a4a..ac8eee093f9c 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info,
return 0;
}
-int arch_copy_tls(struct task_struct *new)
+int arch_set_tls(struct task_struct *new, unsigned long tls)
{
struct user_desc info;
int idx, ret = -EFAULT;
- if (copy_from_user(&info,
- (void __user *) UPT_SI(&new->thread.regs.regs),
- sizeof(info)))
+ if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
goto out;
ret = -EINVAL;
diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c
index 3a621e0d3925..ebd3855d9b13 100644
--- a/arch/x86/um/tls_64.c
+++ b/arch/x86/um/tls_64.c
@@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task)
{
}
-int arch_copy_tls(struct task_struct *t)
+int arch_set_tls(struct task_struct *t, unsigned long tls)
{
/*
* If CLONE_SETTLS is set, we need to save the thread id
- * (which is argument 5, child_tid, of clone) so it can be set
- * during context switches.
+ * so it can be set during context switches.
*/
- t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
+ t->thread.arch.fs = tls;
return 0;
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 4a3fa295d8fe..296c5324dace 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -24,6 +24,7 @@ config XTENSA
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_TRACEHOOK
+ select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9e1c49134c07..3edecc41ef8c 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
- unsigned long thread_fn_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg, struct task_struct *p,
+ unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
@@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
childregs->syscall = regs->syscall;
- /* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
- childregs->threadptr = childregs->areg[5];
+ childregs->threadptr = tls;
} else {
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1);
diff --git a/block/bio.c b/block/bio.c
index a5d75f6bf4c7..94d697217887 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -539,6 +539,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
EXPORT_SYMBOL(zero_fill_bio_iter);
/**
+ * bio_truncate - truncate the bio to small size of @new_size
+ * @bio: the bio to be truncated
+ * @new_size: new size for truncating the bio
+ *
+ * Description:
+ * Truncate the bio to new size of @new_size. If bio_op(bio) is
+ * REQ_OP_READ, zero the truncated part. This function should only
+ * be used for handling corner cases, such as bio eod.
+ */
+void bio_truncate(struct bio *bio, unsigned new_size)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned int done = 0;
+ bool truncated = false;
+
+ if (new_size >= bio->bi_iter.bi_size)
+ return;
+
+ if (bio_op(bio) != REQ_OP_READ)
+ goto exit;
+
+ bio_for_each_segment(bv, bio, iter) {
+ if (done + bv.bv_len > new_size) {
+ unsigned offset;
+
+ if (!truncated)
+ offset = new_size - done;
+ else
+ offset = 0;
+ zero_user(bv.bv_page, offset, bv.bv_len - offset);
+ truncated = true;
+ }
+ done += bv.bv_len;
+ }
+
+ exit:
+ /*
+ * Don't touch bvec table here and make it really immutable, since
+ * fs bio user has to retrieve all pages via bio_for_each_segment_all
+ * in its .end_bio() callback.
+ *
+ * It is enough to truncate bio by updating .bi_size since we can make
+ * correct bvec with the updated .bi_size for drivers.
+ */
+ bio->bi_iter.bi_size = new_size;
+}
+
+/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to
*
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d783bdc4559b..347782a24a35 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors & (lbs - 1);
}
-static unsigned get_max_segment_size(const struct request_queue *q,
- unsigned offset)
+static inline unsigned get_max_segment_size(const struct request_queue *q,
+ struct page *start_page,
+ unsigned long offset)
{
unsigned long mask = queue_segment_boundary(q);
- /* default segment boundary mask means no boundary limit */
- if (mask == BLK_SEG_BOUNDARY_MASK)
- return queue_max_segment_size(q);
-
- return min_t(unsigned long, mask - (mask & offset) + 1,
+ offset = mask & (page_to_phys(start_page) + offset);
+ return min_t(unsigned long, mask - offset + 1,
queue_max_segment_size(q));
}
@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+ seg_size = get_max_segment_size(q, bv->bv_page,
+ bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++;
@@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ unsigned len = min(get_max_segment_size(q, bvec->bv_page,
+ offset), nbytes);
struct page *page = bvec->bv_page;
/*
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b23d1e4bad33..9d0d65efcd94 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
if (!eff) size += skip;
@@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
if (size != eff) {
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
if (!j || j > 2*RX_DMA_BUF) {
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index d4d88b581822..5cf49d9db95e 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -186,7 +186,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;
- zone->cond = BLK_ZONE_COND_CLOSED;
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
break;
case REQ_OP_ZONE_FINISH:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index 31c374b1b91b..7ecf20a6d19c 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
unsigned int cdev = 0;
u32 mnistat, tnistat, tstatus, mcmd;
u16 tnicmd, mnicmd;
- u8 mcapndx;
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
u32 step, rem, rem_isoch, rem_async;
int ret = 0;
@@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
cur = list_entry(pos, struct agp_3_5_dev, list);
dev = cur->dev;
- mcapndx = cur->capndx;
-
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
master[cdev].maxbw = (mnistat >> 16) & 0xff;
@@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
cur = master[cdev].dev;
dev = cur->dev;
- mcapndx = cur->capndx;
-
master[cdev].rq += (cdev == ndevs - 1)
? (rem_async + rem_isoch) : step;
@@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
{
struct pci_dev *td = bridge->dev, *dev = NULL;
u8 mcapndx;
- u32 isoch, arqsz;
+ u32 isoch;
u32 tstatus, mstatus, ncapid;
u32 mmajor;
u16 mpstat;
@@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
if (isoch == 0) /* isoch xfers not available, bail out. */
return -ENODEV;
- arqsz = (tstatus >> 13) & 0x7;
-
/*
* Allocate a head for our AGP 3.5 device list
* (multiple AGP v3 devices are allowed behind a single bridge).
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index b23b0b999232..87f449340202 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -130,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
priv->response_read = true;
ret_size = min_t(ssize_t, size, priv->response_length);
- if (!ret_size) {
+ if (ret_size <= 0) {
priv->response_length = 0;
goto out;
}
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index 1089fc0bb290..f3742bcc73e3 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -14,7 +14,7 @@ struct file_priv {
struct work_struct timeout_work;
struct work_struct async_work;
wait_queue_head_t async_wait;
- size_t response_length;
+ ssize_t response_length;
bool response_read;
bool command_enqueued;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index bb0343ffd235..27c6ca031e23 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
- goto err_start;
+ goto out_err;
}
/* Take control of the TPM's interrupt hardware and shut it off */
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- goto err_start;
+ goto out_err;
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
@@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
rc = tpm_chip_start(chip);
if (rc)
- goto err_start;
-
+ goto out_err;
rc = tpm2_probe(chip);
+ tpm_chip_stop(chip);
if (rc)
- goto err_probe;
+ goto out_err;
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
if (rc < 0)
- goto err_probe;
+ goto out_err;
priv->manufacturer_id = vendor;
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0)
- goto err_probe;
+ goto out_err;
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
@@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
probe = probe_itpm(chip);
if (probe < 0) {
rc = -ENODEV;
- goto err_probe;
+ goto out_err;
}
/* Figure out the capabilities */
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
if (rc < 0)
- goto err_probe;
+ goto out_err;
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
intfcaps);
@@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
- goto err_probe;
+ goto out_err;
}
- chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
@@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
}
}
- tpm_chip_stop(chip);
-
rc = tpm_chip_register(chip);
if (rc)
- goto err_start;
-
- return 0;
+ goto out_err;
-err_probe:
- tpm_chip_stop(chip);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
-err_start:
+ return 0;
+out_err:
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
chip->ops->clk_enable(chip, false);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 4e54856ce2a5..c4f15c4068c0 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
return get_cycles64();
}
-static u64 riscv_sched_clock(void)
+static u64 notrace riscv_sched_clock(void)
{
return get_cycles64();
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index f1d170dcf4d3..aba591d57c67 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -121,6 +121,8 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index fa626acdc9b9..44af435628f8 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
- .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
+ JZ_SOC_DATA_BREAK_LINKS,
};
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a422a8b43cf..18c011e57592 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
SZ_2M, &descs->hw, flags);
- if (!descs->virt && (i > 0)) {
+ if (!descs->virt) {
int idx;
for (idx = 0; idx < i; idx++) {
+ descs = &ioat_chan->descs[idx];
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
descs->virt, descs->hw);
descs->virt = NULL;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index adecea51814f..c5c1aa0dcaed 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
c = p->vchan;
if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
- vchan_cookie_complete(&p->ds_run->vd);
- p->ds_done = p->ds_run;
- p->ds_run = NULL;
+ if (p->ds_run != NULL) {
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ p->ds_run = NULL;
+ }
spin_unlock_irqrestore(&c->vc.lock, flags);
}
if (c && (tc2 & BIT(i))) {
@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
return -EAGAIN;
+ /* Avoid losing track of ds_run if a transaction is in flight */
+ if (c->phy->ds_run)
+ return -EAGAIN;
+
if (vd) {
struct k3_dma_desc_sw *ds =
container_of(vd, struct k3_dma_desc_sw, vd);
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index ec4adf4260a0..256fc662c500 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- vchan_vdesc_fini(vd);
-
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+ vchan_vdesc_fini(vd);
}
}
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index 413cdb4a591d..c0cc72a3b2be 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -10,7 +10,7 @@
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
#define DRVNAME "sifive_edac"
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
index 5b7ef89eb701..ed10da5313e8 100644
--- a/drivers/firmware/broadcom/tee_bnxt_fw.c
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(fw_shm_pool)) {
- tee_client_close_context(pvt_data.ctx);
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
err = PTR_ERR(fw_shm_pool);
goto out_sess;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 6ab25fe1c423..2ed599236a1c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1148,6 +1148,7 @@ config GPIO_MADERA
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
+ select GPIOLIB_IRQCHIP
help
GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index c4fdc192ea4e..94b8d3ae27bc 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
mutex_lock(&chip->lock);
if (test_bit(FLAG_REQUESTED, &desc->flags) &&
- !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
curr = __gpio_mockup_get(chip, offset);
if (curr == value)
goto out;
@@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
irq_type = irq_get_trigger_type(irq);
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
irq_sim_fire(sim, offset);
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 4c3f6370eab4..05ba16fffdad 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
unsigned int bank_num;
for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+ ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
writel_relaxed(gpio->context.datalsw[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
@@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(gpio->context.dirm[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DIRM_OFFSET(bank_num));
- writel_relaxed(gpio->context.int_en[bank_num],
- gpio->base_addr +
- ZYNQ_GPIO_INTEN_OFFSET(bank_num));
writel_relaxed(gpio->context.int_type[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
@@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(gpio->context.int_any[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ writel_relaxed(~(gpio->context.int_en[bank_num]),
+ gpio->base_addr +
+ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
}
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d30e57dc755c..31fee5e918b7 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -21,11 +21,19 @@
#include "gpiolib.h"
#include "gpiolib-acpi.h"
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
+#define QUIRK_NO_WAKEUP 0x02l
+
static int run_edge_events_on_boot = -1;
module_param(run_edge_events_on_boot, int, 0444);
MODULE_PARM_DESC(run_edge_events_on_boot,
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+ "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
event->handle = evt_handle;
event->handler = handler;
event->irq = irq;
- event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
@@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
},
{
/*
@@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+ },
+ {
+ /*
+ * Various HP X2 10 Cherry Trail models use an external
+ * embedded-controller connected via I2C + an ACPI GPIO
+ * event handler. The embedded controller generates various
+ * spurious wakeup events when suspended. So disable wakeup
+ * for its handler (it uses the only ACPI GPIO event handler).
+ * This breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = (void *)QUIRK_NO_WAKEUP,
},
{} /* Terminating entry */
};
static int acpi_gpio_setup_params(void)
{
+ const struct dmi_system_id *id;
+ long quirks = 0;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirks = (long)id->driver_data;
+
if (run_edge_events_on_boot < 0) {
- if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
run_edge_events_on_boot = 0;
else
run_edge_events_on_boot = 1;
}
+ if (honor_wakeup < 0) {
+ if (quirks & QUIRK_NO_WAKEUP)
+ honor_wakeup = 0;
+ else
+ honor_wakeup = 1;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a97fb759e2f4..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
+ }
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0ffc9447b573..01a793a0cbf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
-int amdgpu_noretry = 1;
+int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
@@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
MODULE_PARM_DESC(noretry,
- "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
+ "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
module_param_named(noretry, amdgpu_noretry, int, 0644);
/**
@@ -1359,7 +1359,8 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 44be3a45b25e..e1b8d8daeafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1488,7 +1488,7 @@ out:
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
- AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
+ AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 410587b950f3..914acecda5cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -292,10 +292,10 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC2_JT,
AMDGPU_UCODE_ID_CP_MES,
AMDGPU_UCODE_ID_CP_MES_DATA,
- AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 66328ffa395a..97105a5bb246 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1052,17 +1052,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- /* Disable GFXOFF on original raven. There are combinations
- * of sbios and platforms that are not stable.
- */
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
- adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
- (adev->gfx.rlc_feature_version < 1) ||
- !adev->gfx.rlc.is_rlc_v2_1))
+ if (!(adev->rev_id >= 0x8 ||
+ adev->pdev->device == 0x15d8) &&
+ (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
+ !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7aac9568d3be..803e59d97411 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
-
- timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
- const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
{
+ enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
normalized_clk /= 2;
/* Adjusting pix clock following on HDMI spec based on colour depth */
- switch (timing_out->display_color_depth) {
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
case COLOR_DEPTH_101010:
normalized_clk = (normalized_clk * 30) / 24;
break;
@@ -3387,14 +3381,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
normalized_clk = (normalized_clk * 48) / 24;
break;
default:
- return;
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
}
- if (normalized_clk <= info->max_tmds_clock)
- return;
- reduce_mode_colour_depth(timing_out);
-
- } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
}
static void fill_stream_properties_from_drm_display_mode(
@@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode(
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- adjust_colour_depth_from_display_info(timing_out, info);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
}
static void fill_audio_info(struct audio_info *audio_info,
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 5ff7ccedfbed..a23729d3174b 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -866,6 +866,7 @@ static int smu_sw_init(void *handle)
smu->smu_baco.platform_support = false;
mutex_init(&smu->sensor_lock);
+ mutex_init(&smu->metrics_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index cc71a1078a7a..472e9fed411a 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -862,18 +862,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time ||
time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ac9758305ab3..41fce75b263f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -349,6 +349,7 @@ struct smu_context
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
struct mutex sensor_lock;
+ struct mutex metrics_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 4a14fd1f9fd5..ca62e92e5a4f 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -562,17 +562,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 60b9ff097142..0d3a3b0a934e 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1678,17 +1678,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 875a3a9eabfa..7d0e7b031e44 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
.get_modes = malidp_mw_connector_get_modes,
.mode_valid = malidp_mw_connector_mode_valid,
};
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 273dd80fabf3..644c72f9c594 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -393,7 +393,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8ebeccdeed23..d8e8f3960f4d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1283,7 +1283,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
@@ -1309,6 +1309,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 85e6b2bbb34f..3a5ac13d5801 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index effc4250b230..301897791627 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -4515,8 +4515,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg;
- u32 trans_ddi_func_ctl2_val;
if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
return;
@@ -4524,10 +4522,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
- trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
- PORT_SYNC_MODE_MASTER_SELECT_MASK);
- I915_WRITE(reg, trans_ddi_func_ctl2_val);
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 75dd0e0367b7..68179fb56427 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -4416,9 +4416,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
+
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a47d5a7c32c9..93026217c121 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1413,14 +1413,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
int len;
u32 *cs;
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(i915))
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- /* We need to save the extended state for powersaving modes */
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
@@ -1589,22 +1581,21 @@ static int switch_context(struct i915_request *rq)
}
if (ce->state) {
- u32 hw_flags;
+ u32 flags;
GEM_BUG_ON(rq->engine->id != RCS0);
- /*
- * The kernel context(s) is treated as pure scratch and is not
- * expected to retain any state (as we sacrifice it during
- * suspend and on resume it may be corrupted). This is ok,
- * as nothing actually executes using the kernel context; it
- * is purely used for flushing user contexts.
- */
- hw_flags = 0;
- if (i915_gem_context_is_kernel(rq->gem_context))
- hw_flags = MI_RESTORE_INHIBIT;
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (!i915_gem_context_is_kernel(rq->gem_context))
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ else
+ flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, hw_flags);
+ ret = mi_set_context(rq, flags);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e29bc137e7ba..21aa08f55811 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1660,8 +1660,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4fd3d76db346..094011b8f64d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4177,7 +4177,13 @@ enum {
#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS (1 << 20)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
#define CGPSF_CLKGATE_DIS (1 << 3)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 75ae6f495161..86379eddc908 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6565,6 +6565,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaEnable32PlaneMode:icl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+ 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, PSDUNIT_CLKGATE_DIS);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f80a8ba75977..3305a94fc930 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -215,11 +215,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp;
int i, count = 0;
+ unsigned int local_index = plane - mtk_crtc->planes;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
comp = mtk_crtc->ddp_comp[i];
- if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
- *local_layer = plane->index - count;
+ if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = local_index - count;
return comp;
}
count += mtk_ddp_comp_layer_nr(comp);
@@ -310,7 +311,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
plane_state = to_mtk_plane_state(plane->state);
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
- mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
}
return 0;
@@ -386,8 +389,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
&local_layer);
- mtk_ddp_comp_layer_config(comp, local_layer,
- plane_state);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
@@ -401,7 +405,9 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_ddp_comp *comp;
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
- return mtk_ddp_comp_layer_check(comp, local_layer, state);
+ if (comp)
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+ return 0;
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index e9931bbbe846..d77c9f484ce3 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- u32 ui, cycle_time;
+ u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
- cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->da_hs_prepare;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->lpx = NS_TO_CYCLE(60, cycle_time);
- timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
- timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
- timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = 2 * timing->lpx;
-
- timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
- timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
-
- timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
- timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
- timing->clk_hs_exit = 2 * timing->lpx;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
dsi_tmp_buf_bpp - 10);
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit + 2;
+ timing->da_hs_zero + timing->da_hs_exit + 3;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 18) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 18;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+
+ horizontal_backporch_byte =
+ horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
dsi_tmp_buf_bpp;
}
} else {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 12) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 12;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ horizontal_backporch_byte = horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index a7c4654445c7..68d4644ac2dc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
- drm_connector_cleanup(&hdmi->connector);
- drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 42651d737c55..c81cdce6ed55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 1;
+ tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
@@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
@@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
@@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
- .has_channel_0 = true,
- .needs_edp_reset = true,
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f9f1fe80b206..a62ec826ae71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 8063b1d567b1..e6e4c841fb06 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
- (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+ (usage->hid & HID_USAGE) != 0x00 &&
+ (usage->hid & HID_USAGE) != 0xff && !usage->type) {
hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
usage->hid & HID_USAGE);
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e0b241bd3070..851fe54ea59e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ /* Total size check: Allow for possible report index byte */
+ if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ hid_err(parser->device, "report is too long\n");
+ return -1;
+ }
+
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7e1689ef35f5..3a400ce603c4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -631,6 +631,7 @@
#define USB_VENDOR_ID_ITE 0x048d
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
+#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
@@ -730,6 +731,7 @@
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
#define I2C_DEVICE_ID_LG_8001 0x8001
+#define I2C_DEVICE_ID_LG_7010 0x7010
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -1102,6 +1104,7 @@
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 63855f275a38..dea9cc65bf80 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
- if (device->driver->input_mapped && device->driver->input_mapped(device,
- hidinput, field, usage, &bit, &max) < 0)
- goto ignore;
+ if (device->driver->input_mapped &&
+ device->driver->input_mapped(device, hidinput, field, usage,
+ &bit, &max) < 0) {
+ /*
+ * The driver indicated that no further generic handling
+ * of the usage is desired.
+ */
+ return;
+ }
set_bit(usage->type, input->evbit);
@@ -1215,9 +1221,11 @@ mapped:
set_bit(MSC_SCAN, input->mscbit);
}
-ignore:
return;
+ignore:
+ usage->type = 0;
+ usage->code = 0;
}
static void hidinput_handle_scroll(struct hid_usage *usage,
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index a45f2352618d..c436e12feb23 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3cfeb1629f79..362805ddf377 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
tool = MT_TOOL_DIAL;
else if (unlikely(!confidence_state)) {
tool = MT_TOOL_PALM;
- if (!active &&
+ if (!active && mt &&
input_mt_is_active(&mt->slots[slotnum])) {
/*
* The non-confidence was reported for
@@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_LG,
HID_USB_DEVICE(USB_VENDOR_ID_LG,
USB_DEVICE_ID_LG_MELFAS_MT) },
+ { .driver_data = MT_CLS_LG,
+ HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index d1b39c29e353..0e7b2d998395 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
{ 0 }
};
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8dae0f9b819e..6286204d4c56 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev,
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver connected");
+ /* If using a wireless adaptor ask for connection status */
+ steam->connected = false;
steam_request_conn_status(steam);
} else {
+ /* A wired connection is always present */
+ steam->connected = true;
ret = steam_register(steam);
if (ret) {
hid_err(hdev,
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c3fc0ceb8096..7a75aff78388 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,13 +249,14 @@ out:
static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
poll_wait(file, &list->hidraw->wait, wait);
if (list->head != list->tail)
- return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!list->hidraw->exist)
- return EPOLLERR | EPOLLHUP;
- return 0;
+ mask |= EPOLLERR | EPOLLHUP;
+ return mask;
}
static int hidraw_open(struct inode *inode, struct file *file)
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index a358e61fbc82..009000c5d55c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,8 @@
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+
/* flags */
#define I2C_HID_STARTED 0
@@ -175,6 +177,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ I2C_HID_QUIRK_BAD_INPUT_SIZE },
{ 0, 0 }
};
@@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
}
if ((ret_size > size) || (ret_size < 2)) {
- dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
- __func__, size, ret_size);
- return;
+ if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
+ ihid->inbuf[0] = size & 0xff;
+ ihid->inbuf[1] = size >> 8;
+ ret_size = size;
+ } else {
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ __func__, size, ret_size);
+ return;
+ }
}
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 6c1e6110867f..1fb294ca463e 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,7 +24,9 @@
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define CML_LP_DEVICE_ID 0x02FC
+#define CMP_H_DEVICE_ID 0x06FC
#define EHL_Ax_DEVICE_ID 0x4BB3
+#define TGL_LP_DEVICE_ID 0xA0FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 784dcc8c7022..f491d8b4e24c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index fa0cc0899827..8fe3efcb8327 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -766,13 +766,14 @@ unlock:
static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
poll_wait(file, &uhid->waitq, wait);
if (uhid->head != uhid->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations uhid_fops = {
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e421cdf2d1a4..a970b809d778 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
return 0;
}
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+ struct hiddev_list *list;
+ int error;
+
+ lockdep_assert_held(&hiddev->existancelock);
+
+ list = vzalloc(sizeof(*list));
+ if (!list)
+ return -ENOMEM;
+
+ mutex_init(&list->thread_lock);
+ list->hiddev = hiddev;
+
+ if (!hiddev->open++) {
+ error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+ if (error < 0)
+ goto err_drop_count;
+
+ error = hid_hw_open(hiddev->hid);
+ if (error < 0)
+ goto err_normal_power;
+ }
+
+ spin_lock_irq(&hiddev->list_lock);
+ list_add_tail(&list->node, &hiddev->list);
+ spin_unlock_irq(&hiddev->list_lock);
+
+ file->private_data = list;
+
+ return 0;
+
+err_normal_power:
+ hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+ hiddev->open--;
+ vfree(list);
+ return error;
+}
+
/*
* open file op
*/
static int hiddev_open(struct inode *inode, struct file *file)
{
- struct hiddev_list *list;
struct usb_interface *intf;
struct hid_device *hid;
struct hiddev *hiddev;
@@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
+
hid = usb_get_intfdata(intf);
hiddev = hid->hiddev;
- if (!(list = vzalloc(sizeof(struct hiddev_list))))
- return -ENOMEM;
- mutex_init(&list->thread_lock);
- list->hiddev = hiddev;
- file->private_data = list;
-
- /*
- * no need for locking because the USB major number
- * is shared which usbcore guards against disconnect
- */
- if (list->hiddev->exist) {
- if (!list->hiddev->open++) {
- res = hid_hw_open(hiddev->hid);
- if (res < 0)
- goto bail;
- }
- } else {
- res = -ENODEV;
- goto bail;
- }
-
- spin_lock_irq(&list->hiddev->list_lock);
- list_add_tail(&list->node, &hiddev->list);
- spin_unlock_irq(&list->hiddev->list_lock);
-
mutex_lock(&hiddev->existancelock);
- /*
- * recheck exist with existance lock held to
- * avoid opening a disconnected device
- */
- if (!list->hiddev->exist) {
- res = -ENODEV;
- goto bail_unlock;
- }
- if (!list->hiddev->open++)
- if (list->hiddev->exist) {
- struct hid_device *hid = hiddev->hid;
- res = hid_hw_power(hid, PM_HINT_FULLON);
- if (res < 0)
- goto bail_unlock;
- res = hid_hw_open(hid);
- if (res < 0)
- goto bail_normal_power;
- }
- mutex_unlock(&hiddev->existancelock);
- return 0;
-bail_normal_power:
- hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+ res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
mutex_unlock(&hiddev->existancelock);
- spin_lock_irq(&list->hiddev->list_lock);
- list_del(&list->node);
- spin_unlock_irq(&list->hiddev->list_lock);
-bail:
- file->private_data = NULL;
- vfree(list);
return res;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index ccb74529bc78..d99a9d407671 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
(hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
hdev->product == 0x392 || /* Intuos Pro 2 */
- hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
+ hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */
+ hdev->product == 0x3AA)) { /* MobileStudio Pro */
value = (field->logical_maximum - value);
if (hdev->product == 0x357 || hdev->product == 0x358 ||
hdev->product == 0x392)
value = wacom_offset_rotation(input, usage, value, 3, 16);
else if (hdev->product == 0x34d || hdev->product == 0x34e ||
- hdev->product == 0x398 || hdev->product == 0x399)
+ hdev->product == 0x398 || hdev->product == 0x399 ||
+ hdev->product == 0x3AA)
value = wacom_offset_rotation(input, usage, value, 1, 2);
}
else {
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index e13af4874976..5137e6297022 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = {
static struct at91_twi_pdata sam9x60_config = {
.clk_max_div = 7,
- .clk_offset = 4,
+ .clk_offset = 3,
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index e01b2b57e724..5ab901ad615d 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -58,6 +58,7 @@ struct bcm2835_i2c_dev {
struct i2c_adapter adapter;
struct completion completion;
struct i2c_msg *curr_msg;
+ struct clk *bus_clk;
int num_msgs;
u32 msg_err;
u8 *msg_buf;
@@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
struct resource *mem, *irq;
int ret;
struct i2c_adapter *adap;
- struct clk *bus_clk;
struct clk *mclk;
u32 bus_clk_rate;
@@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
return PTR_ERR(mclk);
}
- bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+ i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
- if (IS_ERR(bus_clk)) {
+ if (IS_ERR(i2c_dev->bus_clk)) {
dev_err(&pdev->dev, "Could not register clock\n");
- return PTR_ERR(bus_clk);
+ return PTR_ERR(i2c_dev->bus_clk);
}
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
@@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
bus_clk_rate = 100000;
}
- ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
+ ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
if (ret < 0) {
dev_err(&pdev->dev, "Could not set clock frequency\n");
return ret;
}
- ret = clk_prepare_enable(bus_clk);
+ ret = clk_prepare_enable(i2c_dev->bus_clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't prepare clock");
return ret;
@@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
static int bcm2835_i2c_remove(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
- clk_rate_exclusive_put(bus_clk);
- clk_disable_unprepare(bus_clk);
+ clk_rate_exclusive_put(i2c_dev->bus_clk);
+ clk_disable_unprepare(i2c_dev->bus_clk);
free_irq(i2c_dev->irq, i2c_dev);
i2c_del_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9f8dcd3f8385..35b209797d7b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
* If we can set SDA, we will always create a STOP to ensure additional
* pulses will do no harm. This is achieved by letting SDA follow SCL
* half a cycle later. Check the 'incomplete_write_byte' fault injector
- * for details.
+ * for details. Note that we must honour tsu:sto, 4us, but lets use 5us
+ * here for simplicity.
*/
bri->set_scl(adap, scl);
- ndelay(RECOVERY_NDELAY / 2);
+ ndelay(RECOVERY_NDELAY);
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
@@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
scl = !scl;
bri->set_scl(adap, scl);
/* Creating STOP again, see above */
- ndelay(RECOVERY_NDELAY / 2);
+ if (scl) {
+ /* Honour minimum tsu:sto */
+ ndelay(RECOVERY_NDELAY);
+ } else {
+ /* Honour minimum tf and thd:dat */
+ ndelay(RECOVERY_NDELAY / 2);
+ }
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7a3b99597ead..146f98fbf22b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -181,15 +181,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
- * @udata: userspace context to pin memory for
+ * @device: IB device to connect UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
*/
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access)
{
- struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
unsigned long lock_limit;
@@ -201,14 +200,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -226,7 +217,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem)
return ERR_PTR(-ENOMEM);
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->length = size;
umem->address = addr;
umem->writable = ib_access_writable(access);
@@ -281,7 +272,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
npages -= ret;
sg = ib_umem_add_sg_table(sg, page_list, ret,
- dma_get_max_seg_size(context->device->dma_device),
+ dma_get_max_seg_size(device->dma_device),
&umem->sg_nents);
up_read(&mm->mmap_sem);
@@ -289,10 +280,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg(context->device,
- umem->sg_head.sgl,
- umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ umem->nmap = ib_dma_map_sg(device,
+ umem->sg_head.sgl,
+ umem->sg_nents,
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
@@ -303,7 +294,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
goto out;
umem_release:
- __ib_umem_release(context->device, umem, 0);
+ __ib_umem_release(device, umem, 0);
vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index b9baf7d0a5cb..cb3b17a7b9b0 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -110,15 +110,12 @@ out_page_list:
* They exist only to hold the per_mm reference to help the driver create
* children umems.
*
- * @udata: udata from the syscall being used to create the umem
+ * @device: IB device to create UMEM
* @access: ib_reg_mr access flags
*/
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access)
{
- struct ib_ucontext *context =
- container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
int ret;
@@ -126,14 +123,11 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
- if (!context)
- return ERR_PTR(-EIO);
-
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp)
return ERR_PTR(-ENOMEM);
umem = &umem_odp->umem;
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
umem_odp->is_implicit_odp = 1;
@@ -201,7 +195,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
/**
* ib_umem_odp_get - Create a umem_odp for a userspace va
*
- * @udata: userspace context to pin memory for
+ * @device: IB device struct to get UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
@@ -210,23 +204,14 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* pinning, instead, stores the mm for future page fault handling in
* conjunction with MMU notifiers.
*/
-struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access,
+struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
+ unsigned long addr, size_t size, int access,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct ib_ucontext *context;
struct mm_struct *mm;
int ret;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL);
@@ -234,7 +219,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!umem_odp)
return ERR_PTR(-ENOMEM);
- umem_odp->umem.ibdev = context->device;
+ umem_odp->umem.ibdev = device;
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 78b27aff2846..3ebae3b65c28 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1993,6 +1993,47 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags)
+{
+ struct ib_mr *mr;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ if (!(pd->device->attrs.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
+ access_flags, NULL);
+
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->res.type = RDMA_RESTRACK_MR;
+ rdma_restrack_kadd(&mr->res);
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_reg_user_mr);
+
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
+{
+ if (!pd->device->ops.advise_mr)
+ return -EOPNOTSUPP;
+
+ return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
+ NULL);
+}
+EXPORT_SYMBOL(ib_advise_mr);
+
int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
struct ib_pd *pd = mr->pd;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 9b6ca15a183c..52b6a4d85460 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -837,7 +837,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -850,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1304,7 +1305,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2545,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto fail;
}
- cq->umem = ib_umem_get(udata, req.cq_va,
+ cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
@@ -3305,8 +3307,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc)
+ if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3512,7 +3516,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 958c1ff9c515..4d07d22bfa7b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp);
} else {
+ /* Before we complete, do WA 9060 */
+ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
- /* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
- }
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index fe3a7e8561df..962dc97a8ff2 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc);
+ mhp->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index a556572058ff..4c7b7800a861 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1355,7 +1355,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
int inline_size;
int err;
- if (udata->inlen &&
+ if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
@@ -1382,7 +1382,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
index adb4a1ba921b..5836fe7b2817 100644
--- a/drivers/infiniband/hw/hfi1/iowait.c
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
void iowait_cancel_work(struct iowait *w)
{
cancel_work_sync(&iowait_get_ib_work(w)->iowork);
- cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+ /* Make sure that the iowork for TID RDMA is used */
+ if (iowait_get_tid_work(w)->iowork.func)
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
}
/**
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index e53f542b60af..8a2e0d9351e9 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
*/
fpsn = full_flow_psn(flow, flow->flow_state.spsn);
req->r_ack_psn = psn;
+ /*
+ * If resync_psn points to the last flow PSN for a
+ * segment and the new segment (likely from a new
+ * request) starts with a new generation number, we
+ * need to adjust resync_psn accordingly.
+ */
+ if (flow->flow_state.generation !=
+ (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+ resync_psn = mask_psn(fpsn - 1);
flow->resync_npkts +=
delta_psn(mask_psn(resync_psn + 1), fpsn);
/*
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 61f53a85767b..5ffe4c996ed3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
u32 npages;
int ret;
- *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 10af6958ab69..bff6abdccfb0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, page_addr,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 95765560c1cf..b9898e71655a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
+ mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 7c8de1e0d48a..3257ad11be48 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -763,7 +763,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_alloc_rq_inline_buf;
}
- hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
+ hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 7113ebfdb4f0..c6d5f06f9cde 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
+ srq->umem =
+ ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
@@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
goto err_user_srq_mtt;
/* config index queue BA */
- srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
+ srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 86375947bc67..c335de91508f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct i40iw_ucontext *ucontext;
- u64 db_addr_offset;
- u64 push_offset;
+ u64 db_addr_offset, push_offset, pfn;
ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) {
@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_private_data = ucontext;
} else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
+ pfn = vma->vm_pgoff +
+ (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+ PAGE_SHIFT);
- return 0;
+ return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ vma->vm_page_prot, NULL);
}
/**
@@ -1758,12 +1756,15 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
int ret;
int pg_shift;
+ if (!udata)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (iwdev->closing)
return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc);
+ region = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 72eeb9a85bc5..f8b936b76dcd 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int shift;
int n;
- *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
+ *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 714f9df5bf39..d41f03ccb0e1 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index dfa17bcdcdbc..b0121c90c561 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,7 +367,7 @@ end:
return block_shift;
}
-static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
u64 length, int access_flags)
{
/*
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags);
+ return ib_umem_get(device, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -504,7 +504,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = mlx4_get_umem_mr(udata, start, length,
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d240bc92b6b..26425dd2d960 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
+ qp->umem =
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8dcf6e3d9ae2..8f9d5035142d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem =
+ ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dd8d24ee8e1d..367a71bc5f4b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -708,8 +708,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size;
cq->buf.umem =
- ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE);
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1108,7 +1108,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL;
- umem = ib_umem_get(udata, ucmd.buf_addr,
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 7bb91d24d394..d7efc9f6daf0 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2129,7 +2129,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 12737c509aa2..61475b571531 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5d41a2c69400..01fc09f3ddd3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -815,6 +815,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
{
+ size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
@@ -828,12 +829,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
u64 max_tso;
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
- if (uhw->outlen && uhw->outlen < resp_len)
+ if (uhw_outlen && uhw_outlen < resp_len)
return -EINVAL;
resp.response_length = resp_len;
- if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
+ if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -897,7 +898,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->raw_packet_caps |=
IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
if (max_tso) {
resp.tso_caps.max_tso = 1 << max_tso;
@@ -907,7 +908,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
resp.rss_caps.rx_hash_function =
MLX5_RX_HASH_FUNC_TOEPLITZ;
resp.rss_caps.rx_hash_fields_mask =
@@ -927,9 +928,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.rss_caps);
}
} else {
- if (field_avail(typeof(resp), tso_caps, uhw->outlen))
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen))
resp.response_length += sizeof(resp.tso_caps);
- if (field_avail(typeof(resp), rss_caps, uhw->outlen))
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen))
resp.response_length += sizeof(resp.rss_caps);
}
@@ -1014,6 +1015,23 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
+ if (!uhw) {
+ /* ODP for kernel QPs is not implemented for receive
+ * WQEs and SRQ WQEs
+ */
+ props->odp_caps.per_transport_caps.rc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.uc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.ud_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.xrc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ }
}
if (MLX5_CAP_GEN(mdev, cd))
@@ -1054,7 +1072,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MAX_CQ_PERIOD;
}
- if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
@@ -1072,7 +1090,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
@@ -1091,7 +1109,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw->outlen)) {
+ uhw_outlen)) {
if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_IB_ALLOW_MPW;
@@ -1104,7 +1122,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ if (field_avail(typeof(resp), flags, uhw_outlen)) {
resp.response_length += sizeof(resp.flags);
if (MLX5_CAP_GEN(mdev, cqe_compression_128))
@@ -1120,8 +1138,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
- if (field_avail(typeof(resp), sw_parsing_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.sw_parsing_caps);
if (MLX5_CAP_ETH(mdev, swp)) {
resp.sw_parsing_caps.sw_parsing_offloads |=
@@ -1141,7 +1158,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
@@ -1164,8 +1181,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), tunnel_offloads_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.tunnel_offloads_caps);
if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
resp.tunnel_offloads_caps |=
@@ -1186,7 +1202,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
- if (uhw->outlen) {
+ if (uhw_outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -4790,7 +4806,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
@@ -4800,7 +4815,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!dprops)
goto out;
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
+ err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
goto out;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index aa14d3c8abd9..7b019bd4de4b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1170,12 +1170,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
- void *buffer, int buflen, size_t *bc);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1fcae0735e01..6fa0a83c19de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -746,10 +746,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- u64 start, u64 length, int access_flags,
- struct ib_umem **umem, int *npages, int *page_shift,
- int *ncont, int *order)
+static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem, int *npages,
+ int *page_shift, int *ncont, int *order)
{
struct ib_umem *u;
@@ -758,7 +757,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags,
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
&mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
@@ -774,7 +773,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags);
+ u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1250,6 +1249,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
length == U64_MAX) {
+ if (virt_addr != start)
+ return ERR_PTR(-EINVAL);
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
@@ -1260,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
}
- err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
+ err = mr_umem_get(dev, start, length, access_flags, &umem,
&npages, &page_shift, &ncont, &order);
if (err < 0)
@@ -1427,9 +1428,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = NULL;
- err = mr_umem_get(dev, udata, addr, len, access_flags,
- &mr->umem, &npages, &page_shift, &ncont,
- &order);
+ err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
if (err)
goto err;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 409dffb5b0c0..4216814ba871 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -535,7 +535,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr;
int err;
- umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
+ umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
@@ -662,11 +662,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
unsigned long current_seq;
u64 access_mask;
- u64 start_idx, page_mask;
+ u64 start_idx;
page_shift = odp->page_shift;
- page_mask = ~(BIT(page_shift) - 1);
- start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
@@ -805,11 +804,19 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ if (unlikely(io_virt < mr->mmkey.iova))
+ return -EFAULT;
+
if (!odp->is_implicit_odp) {
- if (unlikely(io_virt < ib_umem_start(odp) ||
- ib_umem_end(odp) - io_virt < bcnt))
+ u64 user_va;
+
+ if (check_add_overflow(io_virt - mr->mmkey.iova,
+ (u64)odp->umem.address, &user_va))
+ return -EFAULT;
+ if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
return -EFAULT;
- return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
flags);
}
return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
@@ -1275,15 +1282,15 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
wqe = wqe_start;
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
if (qp && sq) {
- ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_initiator_pfault_handler(
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
} else if (qp && !sq) {
- ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_rq(
@@ -1291,8 +1298,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
} else if (!qp) {
struct mlx5_ib_srq *srq = res_to_srq(res);
- ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_srq(
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 425efa1349b0..a4f8e7030787 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -129,14 +129,10 @@ static int is_sqp(enum ib_qp_type qp_type)
*
* Return: zero on success, or an error code.
*/
-static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
- void *buffer,
- u32 buflen,
- int wqe_index,
- int wq_offset,
- int wq_wqe_cnt,
- int wq_wqe_shift,
- int bcnt,
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+ size_t buflen, int wqe_index,
+ int wq_offset, int wq_wqe_cnt,
+ int wq_wqe_shift, int bcnt,
size_t *bytes_copied)
{
size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
@@ -160,11 +156,43 @@ static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
return 0;
}
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied = 0;
+ size_t wqe_length;
+ void *p;
+ int ds;
+
+ wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+ /* read the control segment first */
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ ctrl = p;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* read rest of WQE if it spreads over more than one stride */
+ while (bytes_copied < wqe_length) {
+ size_t copy_length =
+ min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+ if (!copy_length)
+ break;
+
+ memcpy(buffer + bytes_copied, p, copy_length);
+ bytes_copied += copy_length;
+
+ wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ }
+ *bc = bytes_copied;
+ return 0;
+}
+
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -176,18 +204,10 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
int ret;
int ds;
- if (buflen < sizeof(*ctrl))
- return -EINVAL;
-
/* at first read as much as possible */
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
return ret;
@@ -210,13 +230,9 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
* so read the remaining bytes starting
* from wqe_index 0
*/
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer + bytes_copied,
- buflen - bytes_copied,
- 0,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+ buflen - bytes_copied, 0, wq->offset,
+ wq->wqe_cnt, wq->wqe_shift,
wqe_length - bytes_copied,
&bytes_copied2);
@@ -226,11 +242,24 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+
+ if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (!umem)
+ return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+ buflen, bc);
+
+ return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -238,14 +267,9 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
@@ -254,25 +278,33 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t wqe_size = 1 << wq->wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct ib_umem *umem = srq->umem;
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- 0,
- srq->msrq.max,
- srq->msrq.wqe_shift,
- buflen,
- &bytes_copied);
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+ srq->msrq.max, srq->msrq.wqe_shift,
+ buflen, &bytes_copied);
if (ret)
return ret;
@@ -280,6 +312,21 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
return 0;
}
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -749,7 +796,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0);
+ *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +853,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
+ rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 62939df3c692..b1a8a9175040 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 33002530fee7..ac19d57803b5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc);
+ mr->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 9bc1ca6f6f9e..d47ea675734b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 115f187f0c86..484b555150e0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -783,7 +783,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
+ q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -1426,9 +1426,8 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
if (rc)
return rc;
- srq->prod_umem =
- ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access);
+ srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -2850,7 +2849,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a26a4fd86bf4..4f6cc0de7ef9 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq;
}
- cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
+ cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index c61e665ff261..b039f1f00e05 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags);
+ umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index f15809c28f67..9de1281f9a3b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -276,8 +276,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0);
+ qp->rumem =
+ ib_umem_get(pd->device, ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
+ qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 98c8be71d91d..d330decfb80a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
+ srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index b9a76bf74857..72f6534fbb52 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 35a2baf2f364..e83c7b518bfa 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access);
+ umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index d7dd6fcf2db0..f918fca9ada3 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client,
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
- client->buffer[client->tail].input_event_sec =
- event->input_event_sec;
- client->buffer[client->tail].input_event_usec =
- event->input_event_usec;
- client->buffer[client->tail].type = EV_SYN;
- client->buffer[client->tail].code = SYN_DROPPED;
- client->buffer[client->tail].value = 0;
+ client->buffer[client->tail] = (struct input_event) {
+ .input_event_sec = event->input_event_sec,
+ .input_event_usec = event->input_event_usec,
+ .type = EV_SYN,
+ .code = SYN_DROPPED,
+ .value = 0,
+ };
client->packet_head = client->tail;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 55086279d044..ee6c3234df36 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev,
}
}
- __clear_bit(*old_keycode, dev->keybit);
- __set_bit(ke->keycode, dev->keybit);
-
- for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == *old_keycode) {
- __set_bit(*old_keycode, dev->keybit);
- break; /* Setting the bit twice is useless, so break */
+ if (*old_keycode <= KEY_MAX) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (i = 0; i < dev->keycodemax; i++) {
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ /* Setting the bit twice is useless, so break */
+ break;
+ }
}
}
+ __set_bit(ke->keycode, dev->keybit);
return 0;
}
@@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev,
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
- if (test_bit(EV_KEY, dev->evbit) &&
- !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(old_keycode, dev->key)) {
+ if (old_keycode > KEY_MAX) {
+ dev_warn(dev->dev.parent ?: &dev->dev,
+ "%s: got too big old keycode %#x\n",
+ __func__, old_keycode);
+ } else if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
struct input_value vals[] = {
{ EV_KEY, old_keycode, 0 },
input_value_sync
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index 53799527dc75..9f809aeb785c 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work)
return;
}
- state = (bool)msg.state;
+ /*
+ * The response data from SCU firmware is 4 bytes,
+ * but ONLY the first byte is the key state, other
+ * 3 bytes could be some dirty data, so we should
+ * ONLY take the first byte as key state.
+ */
+ state = (bool)(msg.state & 0xff);
if (state ^ priv->keystate) {
priv->keystate = state;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index fd253781be71..f2593133e524 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev,
struct uinput_device *udev = input_get_drvdata(dev);
struct timespec64 ts;
- udev->buff[udev->head].type = type;
- udev->buff[udev->head].code = code;
- udev->buff[udev->head].value = value;
ktime_get_ts64(&ts);
- udev->buff[udev->head].input_event_sec = ts.tv_sec;
- udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+ udev->buff[udev->head] = (struct input_event) {
+ .input_event_sec = ts.tv_sec,
+ .input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
+ .type = type,
+ .code = code,
+ .value = value,
+ };
+
udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
wake_up_interruptible(&udev->waitq);
@@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
poll_wait(file, &udev->waitq, wait);
if (udev->head != udev->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return EPOLLOUT | EPOLLWRNORM;
+ return mask;
}
static int uinput_release(struct inode *inode, struct file *file)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c363294b3bb9..a2e96a5fd9a7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1203,7 +1203,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
@@ -1212,8 +1211,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0;
}
- cookie = domain->iova_cookie;
-
/*
* In fact the whole prepare operation should already be serialised by
* irq_domain_mutex further up the callchain, but that's pretty subtle
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 42966611a192..1801f0aaf013 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5624,8 +5624,10 @@ static int intel_iommu_add_device(struct device *dev)
group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto unlink;
+ }
iommu_group_put(group);
@@ -5651,7 +5653,8 @@ static int intel_iommu_add_device(struct device *dev)
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
"Failed to get a private domain.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unlink;
}
dev_info(dev,
@@ -5666,6 +5669,10 @@ static int intel_iommu_add_device(struct device *dev)
}
return 0;
+
+unlink:
+ iommu_device_unlink(&iommu->iommu, dev);
+ return ret;
}
static void intel_iommu_remove_device(struct device *dev)
@@ -5817,6 +5824,13 @@ static void intel_iommu_apply_resv_region(struct device *dev,
WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
}
+static struct iommu_group *intel_iommu_device_group(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_device_group(dev);
+ return generic_device_group(dev);
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
@@ -5989,7 +6003,7 @@ const struct iommu_ops intel_iommu_ops = {
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
- .device_group = pci_device_group,
+ .device_group = intel_iommu_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fdd40756dbc1..3ead597e1c57 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -751,6 +751,7 @@ err_put_group:
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
+ sysfs_remove_link(group->devices_kobj, device->name);
err_free_name:
kfree(device->name);
err_remove_link:
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 8df547d2d935..0aca5807a119 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -256,7 +256,7 @@ static int __init plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != IRQ_EXT)
+ if (parent.args[0] != RV_IRQ_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 9340435a94a0..6c95dc471d4c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
} else {
list_del_init(&data->list);
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
- data->adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ data->adap->transmit_queue_sz--;
}
if (data->msg.tx_status & CEC_TX_STATUS_OK) {
@@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap)
* need to do anything special in that case.
*/
}
+ /*
+ * If something went wrong and this counter isn't what it should
+ * be, then this will reset it back to 0. Warn if it is not 0,
+ * since it indicates a bug, either in this framework or in a
+ * CEC driver.
+ */
+ if (WARN_ON(adap->transmit_queue_sz))
+ adap->transmit_queue_sz = 0;
}
/*
@@ -456,7 +465,7 @@ int cec_thread_func(void *_adap)
bool timeout = false;
u8 attempts;
- if (adap->transmitting) {
+ if (adap->transmit_in_progress) {
int err;
/*
@@ -491,7 +500,7 @@ int cec_thread_func(void *_adap)
goto unlock;
}
- if (adap->transmitting && timeout) {
+ if (adap->transmit_in_progress && timeout) {
/*
* If we timeout, then log that. Normally this does
* not happen and it is an indication of a faulty CEC
@@ -500,14 +509,18 @@ int cec_thread_func(void *_adap)
* so much traffic on the bus that the adapter was
* unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
*/
- pr_warn("cec-%s: message %*ph timed out\n", adap->name,
- adap->transmitting->msg.len,
- adap->transmitting->msg.msg);
+ if (adap->transmitting) {
+ pr_warn("cec-%s: message %*ph timed out\n", adap->name,
+ adap->transmitting->msg.len,
+ adap->transmitting->msg.msg);
+ /* Just give up on this. */
+ cec_data_cancel(adap->transmitting,
+ CEC_TX_STATUS_TIMEOUT);
+ } else {
+ pr_warn("cec-%s: transmit timed out\n", adap->name);
+ }
adap->transmit_in_progress = false;
adap->tx_timeouts++;
- /* Just give up on this. */
- cec_data_cancel(adap->transmitting,
- CEC_TX_STATUS_TIMEOUT);
goto unlock;
}
@@ -522,7 +535,8 @@ int cec_thread_func(void *_adap)
data = list_first_entry(&adap->transmit_queue,
struct cec_data, list);
list_del_init(&data->list);
- adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ adap->transmit_queue_sz--;
/* Make this the current transmitting message */
adap->transmitting = data;
@@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
valid_la = false;
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
valid_la = false;
- else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
valid_la = false;
else if (cec_msg_is_broadcast(msg) &&
- adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
- !(dir_fl & BCAST2_0))
+ adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST1_4))
valid_la = false;
}
if (valid_la && min_len) {
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index ac88ade94cda..59609556d969 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -116,6 +116,7 @@ struct pulse8 {
unsigned int vers;
struct completion cmd_done;
struct work_struct work;
+ u8 work_result;
struct delayed_work ping_eeprom_work;
struct cec_msg rx_msg;
u8 data[DATA_SIZE];
@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
container_of(work, struct pulse8, work);
+ u8 result = pulse8->work_result;
- switch (pulse8->data[0] & 0x3f) {
+ pulse8->work_result = 0;
+ switch (result & 0x3f) {
case MSGCODE_FRAME_DATA:
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
break;
@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
pulse8->escape = false;
} else if (data == MSGEND) {
struct cec_msg *msg = &pulse8->rx_msg;
+ u8 msgcode = pulse8->buf[0];
if (debug)
dev_info(pulse8->dev, "received: %*ph\n",
pulse8->idx, pulse8->buf);
- pulse8->data[0] = pulse8->buf[0];
- switch (pulse8->buf[0] & 0x3f) {
+ switch (msgcode & 0x3f) {
case MSGCODE_FRAME_START:
msg->len = 1;
msg->msg[0] = pulse8->buf[1];
@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
if (msg->len == CEC_MAX_MSG_SIZE)
break;
msg->msg[msg->len++] = pulse8->buf[1];
- if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+ if (msgcode & MSGCODE_FRAME_EOM) {
+ WARN_ON(pulse8->work_result);
+ pulse8->work_result = msgcode;
schedule_work(&pulse8->work);
+ break;
+ }
break;
case MSGCODE_TRANSMIT_SUCCEEDED:
case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_ACK:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+ WARN_ON(pulse8->work_result);
+ pulse8->work_result = msgcode;
schedule_work(&pulse8->work);
break;
case MSGCODE_HIGH_ERROR:
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c
index edf94ee54ec7..aa9368bf7a0c 100644
--- a/drivers/mtd/nand/onenand/omap2.c
+++ b/drivers/mtd/nand/onenand/omap2.c
@@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
unsigned long timeout;
u32 syscfg;
- if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+ if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
- case FL_RESETING:
+ case FL_RESETTING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
@@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
- tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
xtra = count & 3;
@@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
@@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
c->gpmc_cs, c->phys_base, c->onenand.base,
c->dma_chan ? "DMA" : "PIO");
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ r = onenand_scan(&c->mtd, 1);
+ if (r < 0)
goto err_release_dma;
freq = omap2_onenand_get_freq(c->onenand.version_id);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 77bd32a683e1..85640ee11c86 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
status &= 0x60;
@@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
@@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
@@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
} else {
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
@@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
ret = this->wait(mtd, FL_WRITING);
out:
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
if (!ret)
/* Recalculate device size on boundary change*/
flexonenand_get_size(mtd);
@@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
/* Reset OneNAND to read default register values */
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
/* Wait reset */
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/samsung_mtd.c
index 55e5536a5850..beb7987e4c2b 100644
--- a/drivers/mtd/nand/onenand/samsung_mtd.c
+++ b/drivers/mtd/nand/onenand/samsung_mtd.c
@@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, p, mtd->writesize);
- p = this->page_buf + offset;
+ memcpy_fromio(this->page_buf, p, mtd->writesize);
+ memcpy(buffer, this->page_buf + offset, count);
+ } else {
+ memcpy_fromio(buffer, p, count);
}
- memcpy(buffer, p, count);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 3a36285a8d8a..f6c7102a1e32 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
/* Prepare CDMA descriptor. */
static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
- char nf_mem, u32 flash_ptr, char *mem_ptr,
- char *ctrl_data_ptr, u16 ctype)
+ char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+ dma_addr_t ctrl_data_ptr, u16 ctype)
{
struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
@@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
cdma_desc->command_flags |= CDMA_CF_INT;
- cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->memory_pointer = mem_ptr;
cdma_desc->status = 0;
cdma_desc->sync_flag_pointer = 0;
cdma_desc->sync_arguments = 0;
cdma_desc->command_type = ctype;
- cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+ cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
}
static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
@@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
}
cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
- (void *)dma_buf, (void *)dma_ctrl_dat,
- ctype);
+ dma_buf, dma_ctrl_dat, ctype);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
@@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page)
cadence_nand_cdma_desc_prepare(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
- page, NULL, NULL,
+ page, 0, 0,
CDMA_CT_ERASE);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
if (status) {
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 9e63800f768a..3ba73f18841f 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -37,6 +37,7 @@
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+#define FMC2_TIMEOUT_US 1000
#define FMC2_TIMEOUT_MS 1000
/* Timings */
@@ -53,6 +54,8 @@
#define FMC2_PMEM 0x88
#define FMC2_PATT 0x8c
#define FMC2_HECCR 0x94
+#define FMC2_ISR 0x184
+#define FMC2_ICR 0x188
#define FMC2_CSQCR 0x200
#define FMC2_CSQCFGR1 0x204
#define FMC2_CSQCFGR2 0x208
@@ -118,6 +121,12 @@
#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF BIT(1)
+
/* Register: FMC2_CSQCR */
#define FMC2_CSQCR_CSQSTART BIT(0)
@@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
stm32_fmc2_set_buswidth_16(fmc2, true);
}
+static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ u32 isr, sr;
+
+ /* Check if there is no pending requests to the NAND flash */
+ if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ FMC2_TIMEOUT_US))
+ dev_warn(fmc2->dev, "Waitrdy timeout\n");
+
+ /* Wait tWB before R/B# signal is low */
+ timings = nand_get_sdr_timings(&chip->data_interface);
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ /* R/B# signal is low, clear high level flag */
+ writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+
+ /* Wait R/B# signal is high */
+ return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+ isr, isr & FMC2_ISR_IHLF,
+ 5, 1000 * timeout_ms);
+}
+
static int stm32_fmc2_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
@@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
break;
case NAND_OP_WAITRDY_INSTR:
- ret = nand_soft_waitrdy(chip,
- instr->ctx.waitrdy.timeout_ms);
+ ret = stm32_fmc2_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
break;
}
}
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4744bf94ad9a..b9f272408c4d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl,
/* FTL can contain -1 entries that are by default filled with bits */
if (block == -1) {
- memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ if (buffer)
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
return 0;
}
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index f4afe123e9dc..aeb3ad2dbfb8 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -4596,6 +4596,7 @@ static void sst_set_default_init(struct spi_nor *nor)
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
nor->params.quad_enable = NULL;
nor->params.set_4byte = st_micron_set_4byte;
}
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 4e1789ea2bc3..eacd428e07e9 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -102,6 +102,7 @@
#define TCAN4X5X_MODE_NORMAL BIT(7)
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
+#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
#define TCAN4X5X_SW_RESET BIT(2)
@@ -166,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
}
}
+static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
+{
+ int ret = 0;
+
+ if (priv->reset_gpio) {
+ gpiod_set_value(priv->reset_gpio, 1);
+
+ /* tpulse_width minimum 30us */
+ usleep_range(30, 100);
+ gpiod_set_value(priv->reset_gpio, 0);
+ } else {
+ ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ usleep_range(700, 1000);
+
+ return ret;
+}
+
static int regmap_spi_gather_write(void *context, const void *reg,
size_t reg_len, const void *val,
size_t val_len)
@@ -348,14 +371,23 @@ static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
}
+static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_DISABLE_INH_MSK, 0x01);
+}
+
static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
GPIOD_OUT_HIGH);
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
return -EPROBE_DEFER;
tcan4x5x_disable_wake(cdev);
@@ -366,18 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
if (IS_ERR(tcan4x5x->reset_gpio))
tcan4x5x->reset_gpio = NULL;
- usleep_range(700, 1000);
+ ret = tcan4x5x_reset(tcan4x5x);
+ if (ret)
+ return ret;
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
"device-state",
GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio))
+ if (IS_ERR(tcan4x5x->device_state_gpio)) {
tcan4x5x->device_state_gpio = NULL;
-
- tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
- "vsup");
- if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x_disable_state(cdev);
+ }
return 0;
}
@@ -412,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
if (!priv)
return -ENOMEM;
+ priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else
+ priv->power = NULL;
+
mcan_class->device_data = priv;
m_can_class_get_clocks(mcan_class);
@@ -451,11 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap);
- ret = tcan4x5x_parse_config(mcan_class);
+ ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
goto out_clk;
- tcan4x5x_power_enable(priv->power, 1);
+ ret = tcan4x5x_parse_config(mcan_class);
+ if (ret)
+ goto out_power;
+
+ ret = tcan4x5x_init(mcan_class);
+ if (ret)
+ goto out_power;
ret = m_can_class_register(mcan_class);
if (ret)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 8caf7af0dee2..99101d7027a8 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- int npackets = 0;
- int ret = 1;
+ int work_done = 0;
struct sk_buff *skb;
struct can_frame *frame;
u8 canrflg;
- while (npackets < quota) {
+ while (work_done < quota) {
canrflg = in_8(&regs->canrflg);
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
break;
@@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
- npackets++;
+ work_done++;
netif_receive_skb(skb);
}
- if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
- napi_complete(&priv->napi);
- clear_bit(F_RX_PROGRESS, &priv->flags);
- if (priv->can.state < CAN_STATE_BUS_OFF)
- out_8(&regs->canrier, priv->shadow_canrier);
- ret = 0;
+ if (work_done < quota) {
+ if (likely(napi_complete_done(&priv->napi, work_done))) {
+ clear_bit(F_RX_PROGRESS, &priv->flags);
+ if (priv->can.state < CAN_STATE_BUS_OFF)
+ out_8(&regs->canrier, priv->shadow_canrier);
+ }
}
- return ret;
+ return work_done;
}
static irqreturn_t mscan_isr(int irq, void *dev_id)
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 2f74f6704c12..a4b4b742c80c 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
hconf,
sizeof(*hconf),
1000);
@@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
dconf,
sizeof(*dconf),
1000);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 5fc0be564274..7ab87a758754 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
struct usb_endpoint_descriptor *ep;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
ep = &iface_desc->endpoint[i].desc;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index ae4c37e1bb75..1b9957f12459 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
struct usb_endpoint_descriptor *endpoint;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index f3f0c3f07391..1962c8330daa 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
return -EINVAL;
}
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
@@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
if (rule->fs.flow_type != fs->flow_type ||
rule->fs.ring_cookie != fs->ring_cookie ||
- rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ rule->fs.h_ext.data[0] != fs->h_ext.data[0])
continue;
switch (fs->flow_type & ~FLOW_EXT) {
@@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return -EINVAL;
}
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
layout = &udf_tcpip6_layout;
slice_num = bcm_sf2_get_slice_number(layout, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 120a65d3e3ef..b016cc205f81 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+ /* Use the default high priority for management frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index bc5a6b2bb1e4..5324c6f4ae90 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -211,6 +211,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 7fe256c5739d..0b43c650e100 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode)
+ phy_interface_t mode, bool force)
{
u8 lane;
u16 cmode;
@@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
- /* cmode doesn't change, nothing to do for us */
- if (cmode == chip->ports[port].cmode)
+ /* cmode doesn't change, nothing to do for us unless forced */
+ if (cmode == chip->ports[port].cmode && !force)
return 0;
lane = mv88e6xxx_serdes_get_lane(chip, port);
@@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 9 && port != 10)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
break;
}
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
@@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, true);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index a51ac088c0bc..1da5ac111499 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
if (enabled) {
/* Enable VLAN filtering. */
- tpid = ETH_P_8021AD;
- tpid2 = ETH_P_8021Q;
+ tpid = ETH_P_8021Q;
+ tpid2 = ETH_P_8021AD;
} else {
/* Disable VLAN filtering. */
tpid = ETH_P_SJA1105;
@@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
- /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
- general_params->tpid = tpid;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid2 = tpid2;
/* When VLAN filtering is on, we need to at least be able to
* decode management traffic through the "backup plan".
@@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
if (!clone)
goto out;
- sja1105_ptp_txtstamp_skb(ds, slot, clone);
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
out:
mutex_unlock(&priv->mgmt_lock);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 54258a25031d..43ab7589d0d0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -234,7 +234,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
if (rw == SPI_WRITE)
priv->info->ptp_cmd_packing(buf, cmd, PACK);
- rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
SJA1105_SIZE_PTP_CMD);
if (rw == SPI_READ)
@@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
ptp_data->clock = NULL;
}
-void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
@@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
goto out;
}
- rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+ rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
if (rc < 0) {
dev_err(ds->dev, "timed out polling for tstamp\n");
kfree_skb(skb);
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 0d03e13e9909..63d2311817c4 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
static size_t
sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
- sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
- sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 26b925b5dace..fa6750d973d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (admin->cycle_time_extension)
return -ENOTSUPP;
- if (!ns_to_sja1105_delta(admin->base_time)) {
- dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
- return -ERANGE;
- }
-
for (i = 0; i < admin->num_entries; i++) {
s64 delta_ns = admin->entries[i].interval;
s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index a17a4da7bc15..c85e3e29012c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ aq_nic_set_loopback(self);
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
- aq_nic_set_loopback(self);
-
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 58e891af6e09..ec041f78d063 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
- .hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_loopback = hw_atl_b0_set_loopback,
.hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8910b62e67ed..f547baa6c954 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
u32 speed;
mpi_state = hw_atl_utils_mpi_get_state(self);
- speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G |
- FW2X_RATE_10G);
+ speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
if (!speed) {
link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 035dbb1b2c98..ec25fd81985d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
int ethaddr_bytes = ETH_ALEN;
memset(ppattern + offset, 0xff, magicsync);
- for (j = 0; j < magicsync; j++)
- set_bit(len++, (unsigned long *) pmask);
+ for (j = 0; j < magicsync; j++) {
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
+ }
for (j = 0; j < B44_MAX_PATTERNS; j++) {
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
@@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- set_bit(len++, (unsigned long *) pmask);
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
}
}
return len - 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 7a6e82db4231..bacc8552bce1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+#define BNX2X_VFS_VLAN_CREDIT(bp) \
+ (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
+
#define PF_VLAN_CREDIT_E2(bp, func_num) \
- ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+ ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c5ee363ca5dc..a0503b99dc79 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4088,7 +4088,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->rate = 0;
mgmt->hw.init = &init;
- *tx_clk = clk_register(NULL, &mgmt->hw);
+ *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
if (IS_ERR(*tx_clk))
return PTR_ERR(*tx_clk);
@@ -4416,7 +4416,6 @@ err_out_free_netdev:
err_disable_clocks:
clk_disable_unprepare(tx_clk);
- clk_unregister(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
@@ -4446,7 +4445,6 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
clk_disable_unprepare(bp->tx_clk);
- clk_unregister(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a70ac2097892..becee29f5df7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -504,6 +504,7 @@ struct link_config {
enum cc_pause requested_fc; /* flow control user has requested */
enum cc_pause fc; /* actual link flow control */
+ enum cc_pause advertised_fc; /* actual advertised flow control */
enum cc_fec requested_fec; /* Forward Error Correction: */
enum cc_fec fec; /* requested and actual in use */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 20ab3b6285a2..c837382ee522 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev,
struct port_info *p = netdev_priv(dev);
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
- epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
- epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+ epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
}
static int set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 19d18acfc9a6..844fdcf55118 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (cc_pause & PAUSE_TX)
fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
else
- fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
+ FW_PORT_CAP32_802_3_PAUSE;
} else if (cc_pause & PAUSE_TX) {
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
}
@@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
{
const struct fw_port_cmd *cmd = (const void *)rpl;
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
- struct adapter *adapter = pi->adapter;
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
struct link_config *lc = &pi->link_cfg;
- int link_ok, linkdnrc;
- enum fw_port_type port_type;
+ struct adapter *adapter = pi->adapter;
+ unsigned int speed, fc, fec, adv_fc;
enum fw_port_module_type mod_type;
- unsigned int speed, fc, fec;
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+ int action, link_ok, linkdnrc;
+ enum fw_port_type port_type;
/* Extract the various fields from the Port Information message.
*/
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
switch (action) {
case FW_PORT_ACTION_GET_PORT_INFO: {
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
fec = fwcap_to_cc_fec(acaps);
+ adv_fc = fwcap_to_cc_pause(acaps);
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
@@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc || fec != lc->fec) { /* something changed */
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
+ fec != lc->fec) {
+ /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
dev_warn_ratelimited(adapter->pdev_dev,
@@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
lc->link_ok = link_ok;
lc->speed = speed;
+ lc->advertised_fc = adv_fc;
lc->fc = fc;
lc->fec = fec;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f6fc0875d5b0..f4d41f968afa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
struct port_info *pi = netdev_priv(dev);
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
- pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
- pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+ pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
+ pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index ccca67cf4487..57cfd10a99ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -135,6 +135,7 @@ struct link_config {
enum cc_pause requested_fc; /* flow control user has requested */
enum cc_pause fc; /* actual link flow control */
+ enum cc_pause advertised_fc; /* actual advertised flow control */
enum cc_fec auto_fec; /* Forward Error Correction: */
enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 8a389d617a23..9d49ff211cc1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
static void t4vf_handle_get_port_info(struct port_info *pi,
const struct fw_port_cmd *cmd)
{
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
- struct adapter *adapter = pi->adapter;
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
struct link_config *lc = &pi->link_cfg;
- int link_ok, linkdnrc;
- enum fw_port_type port_type;
+ struct adapter *adapter = pi->adapter;
+ unsigned int speed, fc, fec, adv_fc;
enum fw_port_module_type mod_type;
- unsigned int speed, fc, fec;
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+ int action, link_ok, linkdnrc;
+ enum fw_port_type port_type;
/* Extract the various fields from the Port Information message. */
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
switch (action) {
case FW_PORT_ACTION_GET_PORT_INFO: {
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
fec = fwcap_to_cc_fec(acaps);
+ adv_fc = fwcap_to_cc_pause(acaps);
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
@@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc || fec != lc->fec) { /* something changed */
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
+ fec != lc->fec) {
+ /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
dev_warn_ratelimited(adapter->pdev_dev,
@@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
lc->link_ok = link_ok;
lc->speed = speed;
+ lc->advertised_fc = adv_fc;
lc->fc = fc;
lc->fec = fec;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6a9d12dad5d9..a301f0095223 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
int page_offset;
unsigned int sz;
int *count_ptr;
- int i;
+ int i, j;
vaddr = phys_to_virt(addr);
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
SMP_CACHE_BYTES));
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
/* We may use multiple Rx pools */
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (!dpaa_bp)
goto free_buffers;
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_page(priv->rx_dma_dev, sg_addr,
- DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
skb_add_rx_frag(skb, i - 1, head_page, frag_off,
frag_len, dpaa_bp->size);
}
+
/* Update the pool count for the current {cpu x bpool} */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--;
if (qm_sg_entry_is_final(&sgt[i]))
@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
return skb;
free_buffers:
- /* compensate sw bpool counter changes */
- for (i--; i >= 0; i--) {
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)++;
- }
- }
/* free all the SG entries */
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
- sg_addr = qm_sg_addr(&sgt[i]);
+ for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+ sg_addr = qm_sg_addr(&sgt[j]);
sg_vaddr = phys_to_virt(sg_addr);
+ /* all pages 0..i were unmaped */
+ if (j > i)
+ dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
free_pages((unsigned long)sg_vaddr, 0);
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)--;
+ /* counters 0..i-1 were decremented */
+ if (j >= i) {
+ dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
}
- if (qm_sg_entry_is_final(&sgt[i]))
+ if (qm_sg_entry_is_final(&sgt[j]))
break;
}
/* free the SGT fragment */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 05c1899f6628..9294027e9d90 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return;
regs->version = fec_enet_register_version;
@@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev,
off >>= 2;
buf[off] = readl(&theregs[off]);
}
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index edec61dfc868..9f52e72ff641 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->cnt = cnt;
rx->fill_cnt += work_done;
- /* restock desc ring slots */
- dma_wmb(); /* Ensure descs are visible before ringing doorbell */
gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx);
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index f4889431f9b7..d0244feb0301 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
* may have added descriptors without ringing the doorbell.
*/
- /* Ensure tx descs from a prior gve_tx are visible before
- * ringing doorbell.
- */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
@@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
return NETDEV_TX_OK;
- /* Ensure tx descs are visible before ringing doorbell */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 68d593074f6c..d48292ccda29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -122,6 +122,22 @@ enum {
#endif
};
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+
+#define MLX5E_INNER_TTC_NUM_GROUPS 3
+#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
+ MLX5E_INNER_TTC_GROUP2_SIZE +\
+ MLX5E_INNER_TTC_GROUP3_SIZE)
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 1d6b58860da6..3a975641f902 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- if (!reporter) {
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, err_str);
+
+ if (!reporter)
return err_ctx->recover(&err_ctx->ctx);
- }
+
return devlink_health_report(reporter, err_str, err_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 15b7f0f1427c..73d3dc07331f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -904,22 +904,6 @@ del_rules:
return err;
}
-#define MLX5E_TTC_NUM_GROUPS 3
-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
- MLX5E_TTC_GROUP2_SIZE +\
- MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS 3
-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
- MLX5E_INNER_TTC_GROUP2_SIZE +\
- MLX5E_INNER_TTC_GROUP3_SIZE)
-
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
bool use_ipv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9b32a9c0f497..024e1cddfd0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
- ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
@@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info
return kmemdup(tun_info, tun_size, GFP_KERNEL);
}
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < out_index; i++) {
+ if (flow->encaps[i].e != e)
+ continue;
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+ netdev_err(priv->netdev, "can't duplicate encap action\n");
+ return true;
+ }
+
+ return false;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* must verify if encap is valid or not */
if (e) {
+ /* Check that entry was not already attached to this flow */
+ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
mutex_unlock(&esw->offloads.encap_tbl_lock);
wait_for_completion(&e->res_ready);
@@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_hw_devs(priv, netdev_priv(out_dev));
}
+static bool is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ int err, i, if_count = 0;
bool encap = false;
u32 action = 0;
- int err, i;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
@@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
+ if (is_duplicated_output_device(priv->netdev,
+ out_dev,
+ ifindexes,
+ if_count,
+ extack))
+ return -EOPNOTSUPP;
+
+ ifindexes[if_count] = out_dev->ifindex;
+ if_count++;
+
rcu_read_lock();
uplink_upper =
netdev_master_upper_dev_get_rcu(uplink_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9a48c4310887..8c5df6c7d7b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node)
}
}
-static void del_sw_fte_rcu(struct rcu_head *head)
-{
- struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
- struct mlx5_flow_steering *steering = get_steering(&fte->node);
-
- kmem_cache_free(steering->ftes_cache, fte);
-}
-
static void del_sw_fte(struct fs_node *node)
{
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
-
- call_rcu(&fte->rcu, del_sw_fte_rcu);
+ kmem_cache_free(steering->ftes_cache, fte);
}
static void del_hw_flow_group(struct fs_node *node)
@@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
}
static struct fs_fte *
-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
+lookup_fte_locked(struct mlx5_flow_group *g,
+ const u32 *match_value,
+ bool take_write)
{
struct fs_fte *fte_tmp;
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
-
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
- if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- fte_tmp = NULL;
- goto out;
- }
-
- if (!fte_tmp->node.active) {
- tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
- }
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
-out:
- up_write_ref_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
-{
- struct fs_fte *fte_tmp;
-
- if (!tree_get_node(&g->node))
- return NULL;
-
- rcu_read_lock();
- fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+ rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- rcu_read_unlock();
fte_tmp = NULL;
goto out;
}
- rcu_read_unlock();
-
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
@@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
out:
- tree_put_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
-{
- if (write)
- return lookup_fte_for_write_locked(g, match_value);
+ if (take_write)
+ up_write_ref_node(&g->node, false);
else
- return lookup_fte_for_read_locked(g, match_value);
+ up_read_ref_node(&g->node);
+ return fte_tmp;
}
static struct mlx5_flow_handle *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e8cd997f413e..c2621b911563 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -203,7 +203,6 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
- struct rcu_head rcu;
int modify_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 173e2c12e1c7..cf7b8da0f010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
if (err)
goto err_load;
+ if (boot) {
+ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ if (err)
+ goto err_devlink_reg;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1210,6 +1216,9 @@ out:
return err;
err_reg_dev:
+ if (boot)
+ mlx5_devlink_unregister(priv_to_devlink(dev));
+err_devlink_reg:
mlx5_unload(dev);
err_load:
if (boot)
@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
request_module_nowait(MLX5_IB_MOD);
- err = mlx5_devlink_register(devlink, &pdev->dev);
- if (err)
- goto clean_load;
-
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
return 0;
-clean_load:
- mlx5_unload_one(dev, true);
-
err_load_one:
mlx5_pci_close(dev);
pci_init_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 32e94d2ee5e4..e4cff7abb348 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
/* We need to copy the refcount since this ste
* may have been traversed several times
*/
- refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+ new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste);
@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
if (!rule_mem)
return -ENOMEM;
+ INIT_LIST_HEAD(&rule_mem->list);
+ INIT_LIST_HEAD(&rule_mem->use_ste_list);
+
rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index a5a266983dd3..c6c7d1defbd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
- refcount_set(&dst->refcount, refcount_read(&src->refcount));
+ dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list);
@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{
- return !refcount_read(&ste->refcount);
+ return !ste->refcount;
}
/* Init one ste as a pattern for ste data array */
@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->ste_arr = chunk->ste_arr;
htbl->hw_ste_arr = chunk->hw_ste_arr;
htbl->miss_list = chunk->miss_list;
- refcount_set(&htbl->refcount, 0);
+ htbl->refcount = 0;
for (i = 0; i < chunk->num_of_entries; i++) {
struct mlx5dr_ste *ste = &htbl->ste_arr[i];
ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
- refcount_set(&ste->refcount, 0);
+ ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list);
@@ -713,7 +713,7 @@ out_free_htbl:
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_read(&htbl->refcount))
+ if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 290fe61c33d0..3fdf4a5eb031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste {
u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
- refcount_t refcount;
+ u32 refcount;
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
struct mlx5dr_ste_htbl {
u8 lu_type;
u16 byte_mask;
- refcount_t refcount;
+ u32 refcount;
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_dec_and_test(&htbl->refcount))
+ htbl->refcount--;
+ if (!htbl->refcount)
mlx5dr_ste_htbl_free(htbl);
}
static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
{
- refcount_inc(&htbl->refcount);
+ htbl->refcount++;
}
/* STE utils */
@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- if (refcount_dec_and_test(&ste->refcount))
+ ste->refcount--;
+ if (!ste->refcount)
mlx5dr_ste_free(ste, matcher, nic_matcher);
}
/* initial as 0, increased only when ste appears in a new rule */
static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
{
- refcount_inc(&ste->refcount);
+ ste->refcount++;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 544344ac4894..79057af4fe99 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
+#include <linux/vmalloc.h>
#include <linux/xz.h>
#include "mlxfw_mfa2.h"
#include "mlxfw_mfa2_file.h"
@@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_size = be32_to_cpu(comp->size);
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
- comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
if (!comp_data)
return ERR_PTR(-ENOMEM);
comp_data->comp.data_size = comp_size;
@@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
return &comp_data->comp;
err_out:
- kfree(comp_data);
+ vfree(comp_data);
return ERR_PTR(err);
}
@@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
const struct mlxfw_mfa2_comp_data *comp_data;
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
- kfree(comp_data);
+ vfree(comp_data);
}
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5294a1622643..af30e8a76682 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5472,6 +5472,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 556dca328bb5..f7fd5e8fbf96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4542,8 +4542,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -4626,6 +4626,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
rate = 19 * 1024;
burst_size = 12;
break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
+ rate = 360;
+ burst_size = 7;
+ break;
default:
continue;
}
@@ -4665,6 +4669,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
priority = 5;
tc = 5;
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 68cc6737d45c..46d43cfd04e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -651,6 +651,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
return 0;
+ if (!p->child_handle) {
+ /* This is an invisible FIFO replacing the original Qdisc.
+ * Ignore it--the original Qdisc's destroy will follow.
+ */
+ return 0;
+ }
+
/* See if the grafted qdisc is already offloaded on any tclass. If so,
* unoffload it.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 08b7e9f964da..8290e82240fc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -7079,6 +7079,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
rif = mlxsw_sp->router->rifs[i];
+ if (rif && rif->ops &&
+ rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
+ continue;
if (rif && rif->dev && rif->dev != dev &&
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
mlxsw_sp->mac_mask)) {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index c56fcbb37066..52ed111d98f4 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt);
-MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index bd6c01004913..0e2fa14f1423 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
struct meson8b_dwmac_clk_configs *clk_configs;
+ static const struct clk_div_table div_table[] = {
+ { .div = 2, .val = 2, },
+ { .div = 3, .val = 3, },
+ { .div = 4, .val = 4, },
+ { .div = 5, .val = 5, },
+ { .div = 6, .val = 6, },
+ { .div = 7, .val = 7, },
+ };
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
if (!clk_configs)
@@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
- clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO |
- CLK_DIVIDER_ROUND_CLOSEST;
+ clk_configs->m250_div.table = div_table;
+ clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
&clk_divider_ops,
&clk_configs->m250_div.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 1c8d84ed8410..01b484cb177e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 26353ef616b8..7d40760e9ba8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f51a265459d..80d59b775907 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -4256,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4270,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev)
/* Entry to report the DMA HW features */
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
+
+ register_netdevice_notifier(&stmmac_notifier);
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cc8d7e7bf9ac..4775f49d7f3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -320,7 +320,7 @@ out:
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
- bool mdio = false;
+ bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index e5b7d6d2286e..f6222ada6818 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -540,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
@@ -813,7 +813,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
- goto out_sock;
+ goto out_rel_sock;
}
sk = sock->sk;
@@ -826,8 +826,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-out_sock:
+out_rel_sock:
release_sock(sock->sk);
+out_sock:
sockfd_put(sock);
return sk;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05631d97eeb4..747c0542a53c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -259,7 +259,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
- const struct ethhdr *eth = eth_hdr(skb);
+ const struct ethhdr *eth = skb_eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 3b29d381116f..975789d9349d 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
.read_status = aqr_read_status,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 1585eebb73fe..ee7a718662c6 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -566,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl,
struct sfp_bus *bus;
int ret;
+ if (!fwnode)
+ return 0;
+
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
ret = PTR_ERR(bus);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f940dc6485e5..fb4781080d6d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2724,11 +2724,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static int lan78xx_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
-
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@@ -2740,8 +2735,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
- if (lan78xx_linearize(skb) < 0)
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
return NULL;
+ }
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3ec6b506033d..1c5159dcc720 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index e2e679a01b65..77ccf3672ede 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
spin_lock_irqsave(&sdla_lock, flags);
SDLA_WINDOW(dev, addr);
- pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+ pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
SDLA_WINDOW(dev, addr);
pbuf->opp_flag = 1;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 667f18f465be..5dc32b72e7fa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 56c21b501185..72a7e41f3018 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_HOST_ID:
+ return sizeof(req->sq->ctrl->hostid);
+ default:
+ return 0;
+ }
+}
+
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
return;
switch (cdw10 & 0xff) {
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index ead06c6c2601..12e71a315a2c 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -115,7 +115,7 @@ struct cpcap_usb_ints_state {
enum cpcap_gpio_mode {
CPCAP_DM_DP,
CPCAP_MDM_RX_TX,
- CPCAP_UNKNOWN,
+ CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
CPCAP_OTG_DM_DP,
};
@@ -134,6 +134,8 @@ struct cpcap_phy_ddata {
struct iio_channel *id;
struct regulator *vusb;
atomic_t active;
+ unsigned int vbus_provider:1;
+ unsigned int docked:1;
};
static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
@@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+ enum musb_vbus_id_status status)
+{
+ int error;
+
+ error = musb_mailbox(status);
+ if (!error)
+ return;
+
+ dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+ __func__, error);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_phy_ddata *ddata;
@@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
- if (s.id_ground) {
- dev_dbg(ddata->dev, "id ground, USB host mode\n");
+ vbus = cpcap_usb_vbus_valid(ddata);
+
+ /* We need to kick the VBUS as USB A-host */
+ if (s.id_ground && ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+ CPCAP_BIT_VBUSSTBY_EN |
+ CPCAP_BIT_VBUSEN_SPI,
+ CPCAP_BIT_VBUSEN_SPI);
+ if (error)
+ goto out_err;
+
+ return;
+ }
+
+ if (vbus && s.id_ground && ddata->docked) {
+ dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ return;
+ }
+
+ /* No VBUS needed with docks */
+ if (vbus && s.id_ground && !ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "connected to a dock\n");
+
+ ddata->docked = true;
+
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ /*
+ * Force check state again after musb has reoriented,
+ * otherwise devices won't enumerate after loading PHY
+ * driver.
+ */
+ schedule_delayed_work(&ddata->detect_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
+
+ if (s.id_ground && !ddata->docked) {
+ dev_dbg(ddata->dev, "id ground, USB host mode\n");
+
+ ddata->vbus_provider = true;
+
+ error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI,
@@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work)
vbus = cpcap_usb_vbus_valid(ddata);
+ /* Otherwise assume we're connected to a USB host */
if (vbus) {
- /* Are we connected to a docking station with vbus? */
- if (s.id_ground) {
- dev_dbg(ddata->dev, "connected to a dock\n");
-
- /* No VBUS needed with docks */
- error = cpcap_usb_set_usb_mode(ddata);
- if (error)
- goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
- if (error)
- goto out_err;
-
- return;
- }
-
- /* Otherwise assume we're connected to a USB host */
dev_dbg(ddata->dev, "connected to USB host\n");
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_VALID);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
return;
}
+ ddata->vbus_provider = false;
+ ddata->docked = false;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
/* Default to debug UART mode */
error = cpcap_usb_set_uart_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- goto out_err;
-
dev_dbg(ddata->dev, "set UART mode\n");
return;
@@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
goto out_err;
@@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
+ /* Enable UART mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
@@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
return error;
@@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
- CPCAP_BIT_USBXCVREN,
- CPCAP_BIT_USBXCVREN);
- if (error)
- goto out_err;
-
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_PU_SPI |
CPCAP_BIT_DMPD_SPI |
@@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
+ /* Enable USB mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
@@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
if (error)
dev_err(ddata->dev, "could not set UART mode\n");
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- dev_err(ddata->dev, "could not set mailbox\n");
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index ee184d5607bd..f20524f0c21d 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work)
struct phy_mdm6600 *ddata;
struct device *dev;
DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
- int error, i, val = 0;
+ int error;
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
@@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work)
if (error)
return;
- for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
- val |= test_bit(i, values) << i;
- dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
- __func__, i, test_bit(i, values), val);
- }
- ddata->status = values[0];
+ ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
dev_info(dev, "modem status: %i %s\n",
ddata->status,
- phy_mdm6600_status_name[ddata->status & 7]);
+ phy_mdm6600_status_name[ddata->status]);
complete(&ddata->ack);
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 091e20303a14..66f91726b8b2 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -66,7 +66,7 @@
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 2b97fb1185a0..9ca20c947283 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate && !cfg->fracdiv)
break;
@@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate)
break;
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index f1806fd781a0..530426a74f75 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -2,6 +2,7 @@
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
depends on MFD_LOCHNAGAR
+ select GPIOLIB
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 3c80828a5e50..bbc919bef2bf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
return ret;
meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
+ bit = bit << 1;
ret = regmap_read(pc->reg_ds, reg, &val);
if (ret)
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index f4d0a86c00d0..5e77b0dc5fd6 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES
config CPU_HWMON
tristate "Loongson-3 CPU HWMon Driver"
- depends on CONFIG_MACH_LOONGSON64
+ depends on MACH_LOONGSON64
select HWMON
default y
help
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index a67701ed93e8..2e5b6a6834da 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1295,6 +1295,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
+ if (!rapl_defaults)
+ return ERR_PTR(-ENODEV);
+
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e60eab7f8a61..61fafe0374ce 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -166,9 +166,9 @@ static struct posix_clock_operations ptp_clock_ops = {
.read = ptp_read,
};
-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
{
- struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
@@ -213,7 +213,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
ptp->clock.ops = ptp_clock_ops;
- ptp->clock.release = delete_ptp_clock;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
@@ -236,15 +235,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (err)
goto no_pin_groups;
- /* Create a new device in our class. */
- ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
- ptp, ptp->pin_attr_groups,
- "ptp%d", ptp->index);
- if (IS_ERR(ptp->dev)) {
- err = PTR_ERR(ptp->dev);
- goto no_device;
- }
-
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@@ -260,8 +250,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
}
- /* Create a posix clock. */
- err = posix_clock_register(&ptp->clock, ptp->devid);
+ /* Initialize a new device of our class in our clock structure. */
+ device_initialize(&ptp->dev);
+ ptp->dev.devt = ptp->devid;
+ ptp->dev.class = ptp_class;
+ ptp->dev.parent = parent;
+ ptp->dev.groups = ptp->pin_attr_groups;
+ ptp->dev.release = ptp_clock_release;
+ dev_set_drvdata(&ptp->dev, ptp);
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+ /* Create a posix clock and link it to the device. */
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
pr_err("failed to create posix clock\n");
goto no_clock;
@@ -273,8 +273,6 @@ no_clock:
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
- device_destroy(ptp_class, ptp->devid);
-no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
if (ptp->kworker)
@@ -304,7 +302,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- device_destroy(ptp_class, ptp->devid);
ptp_cleanup_pin_groups(ptp);
posix_clock_unregister(&ptp->clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 9171d42468fd..6b97155148f1 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -28,7 +28,7 @@ struct timestamp_event_queue {
struct ptp_clock {
struct posix_clock clock;
- struct device *dev;
+ struct device dev;
struct ptp_clock_info *info;
dev_t devid;
int index; /* index into clocks.map */
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 989506bd90b1..16f0c8570036 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
int i;
for (i = 0; i < rate_count; i++) {
- if (ramp <= slew_rates[i])
- cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
- else
+ if (ramp > slew_rates[i])
break;
+
+ if (id == AXP20X_DCDC2)
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
+ else
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
}
if (cfg == 0xff) {
@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
- AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index ec764022621f..5bf8a2dc5fe7 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd70528_set_ramp_delay,
};
static const struct regulator_ops bd70528_led_ops = {
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index df2829dd55ad..2ecd8752b088 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-
-#ifdef CONFIG_X86
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 == 0x17) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
- RTC_FREQ_SELECT);
- save_freq_select &= ~RTC_DIV_RESET2;
- } else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
- RTC_FREQ_SELECT);
-#else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
-#endif
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 5249fc99fd5f..9135e2101752 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
- irqen) < 0)
+ irqen) == 0)
mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock);
@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock);
- tm->tm_sec = data[RTC_OFFSET_SEC];
- tm->tm_min = data[RTC_OFFSET_MIN];
- tm->tm_hour = data[RTC_OFFSET_HOUR];
- tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
- tm->tm_year = data[RTC_OFFSET_YEAR];
+ tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+ tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+ tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+ tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+ tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--;
@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
- data[RTC_OFFSET_SEC] = tm->tm_sec;
- data[RTC_OFFSET_MIN] = tm->tm_min;
- data[RTC_OFFSET_HOUR] = tm->tm_hour;
- data[RTC_OFFSET_DOM] = tm->tm_mday;
- data[RTC_OFFSET_MTH] = tm->tm_mon;
- data[RTC_OFFSET_YEAR] = tm->tm_year;
-
mutex_lock(&rtc->lock);
+ ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+ data, RTC_OFFSET_COUNT);
+ if (ret < 0)
+ goto exit;
+
+ data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+ (tm->tm_sec & RTC_AL_SEC_MASK));
+ data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+ (tm->tm_min & RTC_AL_MIN_MASK));
+ data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+ (tm->tm_hour & RTC_AL_HOU_MASK));
+ data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+ (tm->tm_mday & RTC_AL_DOM_MASK));
+ data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+ (tm->tm_mon & RTC_AL_MTH_MASK));
+ data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+ (tm->tm_year & RTC_AL_YEA_MASK));
+
if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC,
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 8dcd20b34dde..852f5f3b3592 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
sun50i_h6_rtc_clk_init);
+/*
+ * The R40 user manual is self-conflicting on whether the prescaler is
+ * fixed or configurable. The clock diagram shows it as fixed, but there
+ * is also a configurable divider in the RTC block.
+ */
+static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
+ .rc_osc_rate = 16000000,
+ .fixed_prescaler = 512,
+};
+static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
+{
+ sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
+ sun8i_r40_rtc_clk_init);
+
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bc4158888af9..29facb913671 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2482,50 +2482,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
rc = qeth_cm_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_cm_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_ulp_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_ulp_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_alloc_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_qdio_establish(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
qeth_free_qdio_queues(card);
- goto out_qdio;
+ return rc;
}
rc = qeth_qdio_activate(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_dm_act(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
- goto out_qdio;
+ return rc;
}
return 0;
-out_qdio:
- qeth_qdio_clear_card(card, !IS_IQD(card));
- qdio_free(CARD_DDEV(card));
- return rc;
}
void qeth_print_status_message(struct qeth_card *card)
@@ -3429,11 +3425,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- if (card->state != CARD_STATE_DOWN) {
- rc = -1;
- goto out;
- }
-
qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
@@ -5035,10 +5026,8 @@ retriable:
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
- if (rc < 0) {
+ if (rc)
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
- goto out;
- }
}
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8c95e6019bac..47d37e75dda6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -287,12 +287,12 @@ static void qeth_l2_stop_card(struct qeth_card *card)
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
- qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
+ qeth_qdio_clear_card(card, 0);
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
@@ -1952,8 +1952,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
/* check if VNICC is currently enabled */
bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
- /* if everything is turned off, VNICC is not active */
- if (!card->options.vnicc.cur_chars)
+ if (!card->options.vnicc.sup_chars)
return false;
/* default values are only OK if rx_bcast was not enabled by user
* or the card is offline.
@@ -2040,8 +2039,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
/* enforce assumed default values and recover settings, if changed */
error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
timeout);
- chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
- chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+ /* Change chars, if necessary */
+ chars_tmp = card->options.vnicc.wanted_chars ^
+ card->options.vnicc.cur_chars;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 04e301de376f..5508ab89b518 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1307,12 +1307,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
- qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
+ qeth_qdio_clear_card(card, 0);
flush_workqueue(card->event_wq);
card->info.promisc_mode = 0;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index f9067ed6c7d3..e8c848f72c6d 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
char *tmp;
- int rc;
if (!IS_IQD(card))
return -EPERM;
- if (card->state != CARD_STATE_DOWN)
- return -EPERM;
- if (card->options.sniffer)
- return -EPERM;
- if (card->options.cq == QETH_CQ_NOTAVAILABLE)
- return -EPERM;
+
+ mutex_lock(&card->conf_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (card->options.sniffer) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **)&buf, "\n");
- if (strlen(tmp) > 8)
- return -EINVAL;
+ if (strlen(tmp) > 8) {
+ rc = -EINVAL;
+ goto out;
+ }
if (card->options.hsuid[0])
/* delete old ip address */
@@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
card->options.hsuid[0] = '\0';
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
- return count;
+ goto out;
}
- if (qeth_configure_cq(card, QETH_CQ_ENABLED))
- return -EPERM;
+ if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
+ rc = -EPERM;
+ goto out;
+ }
snprintf(card->options.hsuid, sizeof(card->options.hsuid),
"%-8s", tmp);
@@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
rc = qeth_l3_modify_hsuid(card, true);
+out:
+ mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
index a9ffff3277c7..a5069394cd61 100644
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -9,7 +9,7 @@
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
#define SIFIVE_L2_DIRECCFIX_LOW 0x100
#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 76d6b94a7597..5a25da377119 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -172,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws)
static void dw_writer(struct dw_spi *dws)
{
- u32 max = tx_max(dws);
+ u32 max;
u16 txw = 0;
+ spin_lock(&dws->buf_lock);
+ max = tx_max(dws);
while (max--) {
/* Set the tx word if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) {
@@ -186,13 +188,16 @@ static void dw_writer(struct dw_spi *dws)
dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void dw_reader(struct dw_spi *dws)
{
- u32 max = rx_max(dws);
+ u32 max;
u16 rxw;
+ spin_lock(&dws->buf_lock);
+ max = rx_max(dws);
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
@@ -204,6 +209,7 @@ static void dw_reader(struct dw_spi *dws)
}
dws->rx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -276,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master,
{
struct dw_spi *dws = spi_controller_get_devdata(master);
struct chip_data *chip = spi_get_ctldata(spi);
+ unsigned long flags;
u8 imask = 0;
u16 txlevel = 0;
u32 cr0;
int ret;
dws->dma_mapped = 0;
-
+ spin_lock_irqsave(&dws->buf_lock, flags);
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
dws->len = transfer->len;
+ spin_unlock_irqrestore(&dws->buf_lock, flags);
spi_enable_chip(dws, 0);
@@ -471,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+ spin_lock_init(&dws->buf_lock);
spi_controller_set_devdata(master, dws);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 38c7de1f0aa9..1bf5713e047d 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -119,6 +119,7 @@ struct dw_spi {
size_t len;
void *tx;
void *tx_end;
+ spinlock_t buf_lock;
void *rx;
void *rx_end;
int dma_mapped;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 442cff71a0d2..8428b69c858b 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -185,6 +185,7 @@ struct fsl_dspi {
struct spi_transfer *cur_transfer;
struct spi_message *cur_msg;
struct chip_data *cur_chip;
+ size_t progress;
size_t len;
const void *tx;
void *rx;
@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
- /* Write two TX FIFO entries first, and then the corresponding
- * CMD FIFO entry.
+ /* Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries.
*/
u32 data = dspi_pop_tx(dspi);
- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
- /* LSB */
- tx_fifo_write(dspi, data & 0xFFFF);
- tx_fifo_write(dspi, data >> 16);
- } else {
- /* MSB */
- tx_fifo_write(dspi, data >> 16);
- tx_fifo_write(dspi, data & 0xFFFF);
- }
cmd_fifo_write(dspi);
+ tx_fifo_write(dspi, data & 0xFFFF);
+ tx_fifo_write(dspi, data >> 16);
} else {
/* Write one entry to both TX FIFO and CMD FIFO
* simultaneously.
@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
u32 spi_tcr;
spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
- dspi->tx - dspi->bytes_per_word, !dspi->irq);
+ dspi->progress, !dspi->irq);
/* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started.
@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
/* Update total number of bytes that were transferred */
msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+ dspi->progress += spi_tcnt;
trans_mode = dspi->devtype_data->trans_mode;
if (trans_mode == DSPI_EOQ_MODE)
@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
return 0;
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi);
@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
+ dspi->progress = 0;
/* Validated transfer specific frame size (defaults applied) */
dspi->bits_per_word = transfer->bits_per_word;
if (transfer->bits_per_word <= 8)
@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 47cde1864630..ce9b30112e26 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
}
}
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+ unsigned int threshold)
{
- unsigned int fifo_threshold, fill_bytes;
u32 val;
- fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
- bytes_per_word(priv->bits_per_word));
- fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
-
- fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
-
- /* set fifo threshold */
val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int fifo_threshold, fill_words;
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+ fill_words = fifo_threshold -
+ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
- while (fill_bytes--)
+ while (fill_words--)
uniphier_spi_send(priv);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e4c4532f7f3..8994545367a2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work)
* advances its @tx buffer pointer monotonically.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
- * preparing to transmit right now.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will disable IRQs and preemption for the duration of the
* transfer, for less jitter in time measurement. Only compatible
* with PIO drivers. If true, must follow up with
@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work)
*/
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_pre)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_pre = progress;
xfer->timestamped_pre = true;
@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
* timestamped.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
- * just transmitted.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
*/
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_post)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_post)
return;
ptp_read_system_postts(xfer->ptp_sts);
@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
}
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_post = progress;
xfer->timestamped_post = true;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index dbff0f7e7cf5..ddc0dc93d08b 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -46,8 +46,8 @@
#define PCI171X_RANGE_UNI BIT(4)
#define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0)
#define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0)
#define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
#define PCI171X_STATUS_REG 0x06 /* R: status register */
#define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 08eaa0bad0de..1c9c3ba4d518 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
__u16 reserved1;
__u32 bayer_sign;
__u8 bayer_nf;
- __u8 reserved2[3];
+ __u8 reserved2[7];
} __attribute__((aligned(32))) __packed;
/**
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index a7cac0719b8b..b5d42f411dd8 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 8d19ae71e7cc..4e651b698617 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
memcpy(array, addr, length);
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_BBREG, length, array);
if (ret)
goto end;
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 56cd77fd9ea0..7958fc165462 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -719,7 +719,7 @@ end:
*/
int vnt_radio_power_on(struct vnt_private *priv)
{
- int ret = true;
+ int ret = 0;
vnt_exit_deep_sleep(priv);
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 6074ceda78bf..50e1c8918040 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -259,6 +259,7 @@ struct vnt_private {
u8 mac_hw;
/* netdev */
struct usb_device *usb;
+ struct usb_interface *intf;
u64 tsf_time;
u8 rx_rate;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 4ac85ecb0921..9cb924c54571 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = {
int vnt_init(struct vnt_private *priv)
{
- if (!(vnt_init_registers(priv)))
+ if (vnt_init_registers(priv))
return -EAGAIN;
SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr);
@@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
priv = hw->priv;
priv->hw = hw;
priv->usb = udev;
+ priv->intf = intf;
vnt_set_options(priv);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index d3304df6bd53..d977d4777e4f 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
@@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
reg_off, reg, sizeof(u8), &data);
}
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 length, u8 *data)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < length; i += block) {
+ u16 len = min_t(int, length - i, block);
+
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE,
+ i, reg, len, data + i);
+ if (ret)
+ goto end;
+ }
+end:
+ return ret;
+}
+
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
@@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 95147ec7b96a..b65d9c01a211 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -18,6 +18,8 @@
#include "device.h"
+#define VNT_REG_BLOCK_SIZE 64
+
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer);
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
@@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 len, u8 *data);
+
int vnt_start_interrupt_urb(struct vnt_private *priv);
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
int vnt_tx_context(struct vnt_private *priv,
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 3eb2f11a5de1..2c5250ca2801 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work)
if (vnt_init(priv)) {
/* If fail all ends TODO retry */
dev_err(&priv->usb->dev, "failed to start\n");
+ usb_set_intfdata(priv->intf, NULL);
ieee80211_free_hw(priv->hw);
return;
}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 015e7d201598..0e7cf5236932 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv)
irq = platform_get_irq_byname(pdev, "uplow");
if (irq < 0) {
ret = irq;
+ /* For old DTs with no IRQ defined */
+ if (irq == -ENXIO)
+ ret = 0;
goto err_put_device;
}
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 226adeec2aed..ce5309d00280 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
return AE_OK;
}
+static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
+ { "INT3511", 0 },
+ { "INT3512", 0 },
+ { },
+};
+
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
if (acpi_device_enumerated(adev))
return AE_OK;
+ /* Skip if black listed */
+ if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
+ return AE_OK;
+
if (acpi_serdev_check_resources(ctrl, adev))
return AE_OK;
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 5023c85ebc6e..044c3cbdcfa4 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -89,8 +89,7 @@ void tty_port_link_device(struct tty_port *port,
{
if (WARN_ON(index >= driver->num))
return;
- if (!driver->ports[index])
- driver->ports[index] = port;
+ driver->ports[index] = port;
}
EXPORT_SYMBOL_GPL(tty_port_link_device);
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 4c1e75509303..02f6ca2cb1ba 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
*/
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
u32 reg;
- priv_dev = cdns->gadget_dev;
-
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
@@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
*/
static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
int bit;
u32 reg;
- priv_dev = cdns->gadget_dev;
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
@@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
priv_dev = cdns->gadget_dev;
- devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
@@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
cdns3_device_irq_handler,
cdns3_device_thread_irq_handler,
- IRQF_SHARED, dev_name(cdns->dev), cdns);
+ IRQF_SHARED, dev_name(cdns->dev),
+ cdns->gadget_dev);
if (ret)
goto err0;
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index b45ceb91c735..48e4a5ca1835 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
struct ehci_ci_priv {
struct regulator *reg_vbus;
+ bool enabled;
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
@@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
- if (priv->reg_vbus) {
+ if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
@@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
enable ? "enable" : "disable", ret);
return ret;
}
+ priv->enabled = enable;
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5f40117e68e7..26bc05e48d8a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_INT] = 1024,
};
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
- int asnum, struct usb_host_interface *ifp, int num_ep,
- unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+ struct usb_endpoint_descriptor *e2)
+{
+ if (e1->bEndpointAddress == e2->bEndpointAddress)
+ return true;
+
+ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+ int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+ struct usb_endpoint_descriptor *epd;
+ struct usb_interface_cache *intfc;
+ struct usb_host_interface *alt;
+ int i, j, k;
+
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intfc = config->intf_cache[i];
+
+ for (j = 0; j < intfc->num_altsetting; ++j) {
+ alt = &intfc->altsetting[j];
+
+ if (alt->desc.bInterfaceNumber == inum &&
+ alt->desc.bAlternateSetting != asnum)
+ continue;
+
+ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+ epd = &alt->endpoint[k].desc;
+
+ if (endpoint_is_duplicate(epd, d))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_config *config, int inum, int asnum,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
- if (ifp->endpoint[i].desc.bEndpointAddress ==
- d->bEndpointAddress) {
- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
- }
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
- /* Validate the wMaxPacketSize field */
+ /*
+ * Validate the wMaxPacketSize field.
+ * Some devices have isochronous endpoints in altsetting 0;
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
maxp = usb_endpoint_maxp(&endpoint->desc);
- if (maxp == 0) {
- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Find the highest legal maxpacket size for this endpoint */
@@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
- num_ep, buffer, size);
+ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+ alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f229ad6952c0..8c4e5adbf820 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2692,7 +2692,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme)
+#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme))
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c960a97ea02..154f3f3e8cff 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
+ /*
+ * For OUT direction, host may send less than the setup
+ * length. Return true for all OUT requests.
+ */
+ if (!req->direction)
+ return true;
+
return req->request.actual == req->request.length;
}
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index ae70ce29d5e4..797d6ace8994 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -445,6 +445,7 @@ config USB_TEGRA_XUDC
tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PHY_TEGRA_XUSB
+ select USB_ROLE_SWITCH
help
Enables NVIDIA Tegra USB 3.0 device mode controller driver.
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 38183ac438c6..1371b0c249ec 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
- if (IS_ERR(da8xx_ohci->oc_gpio))
+ if (IS_ERR(da8xx_ohci->oc_gpio)) {
+ error = PTR_ERR(da8xx_ohci->oc_gpio);
goto err;
+ }
if (da8xx_ohci->oc_gpio) {
oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
- if (oc_irq < 0)
+ if (oc_irq < 0) {
+ error = oc_irq;
goto err;
+ }
error = devm_request_threaded_irq(dev, oc_irq, NULL,
ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5261f8dfedec..e3b8c84ccdb8 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
static int jz4740_musb_init(struct musb *musb)
{
struct device *dev = musb->controller->parent;
+ int err;
if (dev->of_node)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
- dev_err(dev, "No transceiver configured\n");
- return PTR_ERR(musb->xceiv);
+ err = PTR_ERR(musb->xceiv);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No transceiver configured: %d", err);
+ return err;
}
/* Silicon does not implement ConfigData register.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 15cca912c53e..5ebf30bd61bd 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb);
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
@@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
@@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 5fc6825745f2..2d3751d885b4 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, 0,
- dev_name(musb->controller), &controller->controller)) {
+ dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e9491d400a24..2d919d0e6e45 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -567,6 +567,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
+/* Device needs ZLP */
+#define ZLP BIT(17)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1172,6 +1175,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1196,6 +1201,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2097,6 +2104,9 @@ static int option_attach(struct usb_serial *serial)
if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
+ if (device_flags & ZLP)
+ data->use_zlp = 1;
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 1c120eaf4091..934e9361cf6b 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -38,6 +38,7 @@ struct usb_wwan_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
unsigned int use_send_setup:1;
+ unsigned int use_zlp:1;
int in_flight;
unsigned int open_ports;
void *private;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 7e855c87e4f7..13be21aad2f4 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
@@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
+ if (intfdata->use_zlp && dir == USB_DIR_OUT)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
return urb;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index c1f7073a56de..8b4ff9fff340 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
- unsigned int cnt;
+ unsigned int cnt, payload_cnt;
u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+ /*
+ * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
+ * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
+ * defined in table 4-36 as one greater than the number of
+ * bytes received. And that number includes the header. So:
+ */
+ if (cnt > 3)
+ payload_cnt = cnt - (1 + sizeof(msg.header));
+ else
+ payload_cnt = 0;
tcpci_read16(tcpci, TCPC_RX_HDR, &header);
msg.header = cpu_to_le16(header);
- if (WARN_ON(cnt > sizeof(msg.payload)))
- cnt = sizeof(msg.payload);
+ if (WARN_ON(payload_cnt > sizeof(msg.payload)))
+ payload_cnt = sizeof(msg.payload);
- if (cnt > 0)
+ if (payload_cnt > 0)
regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
- &msg.payload, cnt);
+ &msg.payload, payload_cnt);
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 8569bbd3762f..831c9470bdc1 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
-#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30)
+#define UCSI_ENABLE_NTFY_ERROR BIT(31)
#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
/* SET_UOR command bits */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1679e0dc869b..cec868f8db3f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG
config MAX77620_WATCHDOG
tristate "Maxim Max77620 Watchdog Timer"
depends on MFD_MAX77620 || COMPILE_TEST
+ select WATCHDOG_CORE
help
This is the driver for the Max77620 watchdog timer.
Say 'Y' here to enable the watchdog timer support for
@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT
config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer"
depends on X86
+ select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer in the TQMX86 IO
controller found on some of their ComExpress Modules.
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 0a87c6f4bab2..11b9e7c6b7f5 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_enable(wdog, true);
imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
/* wait for wdog to fire */
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 1cccf8eb1c5d..8e6dfe76f9c9 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
/*
* Not all supported platforms specify an interrupt for the
@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
}
/* Optional 2nd interrupt for pretimeout */
- irq = platform_get_irq(pdev, 1);
+ irq = platform_get_irq_optional(pdev, 1);
if (irq > 0) {
orion_wdt_info.options |= WDIOF_PRETIMEOUT;
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index 234876047431..6e524c8e26a8 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = {
module_platform_driver(rn5t618_wdt_driver);
+MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index fdf533fe0bb2..56a4a4030ca9 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -420,7 +420,7 @@ static int wdt_find(int addr)
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
case NCT6116_ID:
- ret = nct6102;
+ ret = nct6116;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ee834ef7beb4..43e1660f450f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -447,7 +447,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
if (blkcg_css) {
bio->bi_opf |= REQ_CGROUP_PUNT;
- bio_associate_blkg_from_css(bio, blkcg_css);
+ kthread_associate_blkcg(blkcg_css);
}
refcount_set(&cb->pending_bios, 1);
@@ -491,6 +491,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
+ if (blkcg_css)
+ bio->bi_opf |= REQ_CGROUP_PUNT;
bio_add_page(bio, page, PAGE_SIZE, 0);
}
if (bytes_left < PAGE_SIZE) {
@@ -517,6 +519,9 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_endio(bio);
}
+ if (blkcg_css)
+ kthread_associate_blkcg(NULL);
+
return 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e3c76645cad7..5509c41a4f43 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1479,10 +1479,10 @@ next_slot:
disk_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
/*
- * If extent we got ends before our range starts, skip
- * to next extent
+ * If the extent we got ends before our current offset,
+ * skip to the next extent.
*/
- if (extent_end <= start) {
+ if (extent_end <= cur_offset) {
path->slots[0]++;
goto next_slot;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index d8c7242426bb..18a87ec8a465 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3031,11 +3031,9 @@ static void end_bio_bh_io_sync(struct bio *bio)
* errors, this only handles the "we need to be able to
* do IO at the final sector" case.
*/
-void guard_bio_eod(int op, struct bio *bio)
+void guard_bio_eod(struct bio *bio)
{
sector_t maxsector;
- struct bio_vec *bvec = bio_last_bvec_all(bio);
- unsigned truncated_bytes;
struct hd_struct *part;
rcu_read_lock();
@@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
return;
- /* Uhhuh. We've got a bio that straddles the device size! */
- truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
-
- /*
- * The bio contains more than one segment which spans EOD, just return
- * and let IO layer turn it into an EIO
- */
- if (truncated_bytes > bvec->bv_len)
- return;
-
- /* Truncate the bio.. */
- bio->bi_iter.bi_size -= truncated_bytes;
- bvec->bv_len -= truncated_bytes;
-
- /* ..and clear the end of the buffer for reads */
- if (op == REQ_OP_READ) {
- struct bio_vec bv;
-
- mp_bvec_last_segment(bvec, &bv);
- zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
- truncated_bytes);
- }
+ bio_truncate(bio, maxsector << 9);
}
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@@ -3118,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- /* Take care of bh's that straddle the end of the device */
- guard_bio_eod(op, bio);
-
if (buffer_meta(bh))
op_flags |= REQ_META;
if (buffer_prio(bh))
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ /* Take care of bh's that straddle the end of the device */
+ guard_bio_eod(bio);
+
if (wbc) {
wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 00dfe17871ac..c5e6eff5a381 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -352,7 +352,7 @@ static struct kobject *cdev_get(struct cdev *p)
if (owner && !try_module_get(owner))
return NULL;
- kobj = kobject_get(&p->kobj);
+ kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
module_put(owner);
return kobj;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0ec4f270139f..00b4d15bb811 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -39,6 +39,8 @@
#include <linux/atomic.h>
#include <linux/prefetch.h>
+#include "internal.h"
+
/*
* How many user pages to map in one call to get_user_pages(). This determines
* the size of a structure in the slab cache
diff --git a/fs/file.c b/fs/file.c
index 2f4fcf985079..3da91a112bab 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -960,7 +960,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
return ksys_dup3(oldfd, newfd, 0);
}
-SYSCALL_DEFINE1(dup, unsigned int, fildes)
+int ksys_dup(unsigned int fildes)
{
int ret = -EBADF;
struct file *file = fget_raw(fildes);
@@ -975,6 +975,11 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
return ret;
}
+SYSCALL_DEFINE1(dup, unsigned int, fildes)
+{
+ return ksys_dup(fildes);
+}
+
int f_dupfd(unsigned int from, struct file *file, unsigned flags)
{
int err;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d5c2a3158610..a66e425884d1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1498,8 +1498,10 @@ static int __init init_hugetlbfs_fs(void)
/* other hstates are optional */
i = 0;
for_each_hstate(h) {
- if (i == default_hstate_idx)
+ if (i == default_hstate_idx) {
+ i++;
continue;
+ }
mnt = mount_one_hugetlbfs(h);
if (IS_ERR(mnt))
diff --git a/fs/internal.h b/fs/internal.h
index 4a7da1df573d..e3fa69544b66 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
/*
* buffer.c
*/
-extern void guard_bio_eod(int rw, struct bio *bio);
+extern void guard_bio_eod(struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 562e3a1a1bf9..38b54051facd 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1863,18 +1863,6 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
else
ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
- /*
- * In case of a short read, punt to async. This can happen
- * if we have data partially cached. Alternatively we can
- * return the short read, in which case the application will
- * need to issue another SQE and wait for it. That SQE will
- * need async punt anyway, so it's more efficient to do it
- * here.
- */
- if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
- (req->flags & REQ_F_ISREG) &&
- ret2 > 0 && ret2 < io_size)
- ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, nxt, req->in_async);
diff --git a/fs/mpage.c b/fs/mpage.c
index a63620cdb73a..ccba3c4c4479 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
{
bio->bi_end_io = mpage_end_io;
bio_set_op_attrs(bio, op, op_flags);
- guard_bio_eod(op, bio);
+ guard_bio_eod(bio);
submit_bio(bio);
return NULL;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index be601d3a8008..5e1bf611a9eb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1728,7 +1728,7 @@ static bool is_mnt_ns_file(struct dentry *dentry)
dentry->d_fsdata == &mntns_operations;
}
-struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
{
return container_of(ns, struct mnt_namespace, ns);
}
diff --git a/fs/nsfs.c b/fs/nsfs.c
index a0431642c6b5..f75767bd623a 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -3,6 +3,7 @@
#include <linux/pseudo_fs.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/proc_fs.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/ktime.h>
@@ -11,6 +12,8 @@
#include <linux/nsfs.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
static struct vfsmount *nsfs_mnt;
static long ns_ioctl(struct file *filp, unsigned int ioctl,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 1c4c51f3df60..cda1027d0819 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
&dlm_debug->d_filter_secs);
+ ocfs2_get_dlm_debug(dlm_debug);
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 1afe57f425a0..68ba354cf361 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
+ if (replayed) {
+ jbd2_journal_lock_updates(journal->j_journal);
+ status = jbd2_journal_flush(journal->j_journal);
+ jbd2_journal_unlock_updates(journal->j_journal);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 84ad1c90d535..249672bf54fe 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -631,12 +631,15 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
/**
* posix_acl_update_mode - update mode in set_acl
+ * @inode: target inode
+ * @mode_p: mode (pointer) for update
+ * @acl: acl pointer
*
* Update the file mode when setting an ACL: compute the new file permission
* bits based on the ACL. In addition, if the ACL is equivalent to the new
- * file mode, set *acl to NULL to indicate that no ACL should be set.
+ * file mode, set *@acl to NULL to indicate that no ACL should be set.
*
- * As with chmod, clear the setgit bit if the caller is not in the owning group
+ * As with chmod, clear the setgid bit if the caller is not in the owning group
* or capable of CAP_FSETID (see inode_change_ok).
*
* Called from set_acl inode operations.
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 8caff834f002..013486b5125e 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
prz = cxt->dprzs[cxt->dump_write_cnt];
+ /*
+ * Since this is a new crash dump, we need to reset the buffer in
+ * case it still has an old dump present. Without this, the new dump
+ * will get appended, which would seriously confuse anything trying
+ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
+ * expects to find a dump header in the beginning of buffer data, so
+ * we must to reset the buffer values, in order to ensure that the
+ * header will be written to the beginning of the buffer.
+ */
+ persistent_ram_zap(prz);
+
/* Build header and append record contents. */
hlen = ramoops_write_kmsg_hdr(prz, record);
if (!hlen)
@@ -572,6 +583,7 @@ static int ramoops_init_przs(const char *name,
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
&cxt->ecc_info,
cxt->memtype, flags, label);
+ kfree(label);
if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -617,6 +629,7 @@ static int ramoops_init_prz(const char *name,
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
+ kfree(label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 8823f65888f0..1f4d8c06f9be 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -574,7 +574,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
/* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
- prz->label = label;
+ prz->label = kstrdup(label, GFP_KERNEL);
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 3cdb84cdc488..853d92ceee64 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+void bio_truncate(struct bio *bio, unsigned new_size);
static inline void zero_fill_bio(struct bio *bio)
{
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 679a42253170..a81c13ac1972 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
- struct bio_vec *seg)
-{
- unsigned total = bvec->bv_offset + bvec->bv_len;
- unsigned last_page = (total - 1) / PAGE_SIZE;
-
- seg->bv_page = bvec->bv_page + last_page;
-
- /* the whole segment is inside the last page */
- if (bvec->bv_offset >= last_page * PAGE_SIZE) {
- seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
- seg->bv_len = bvec->bv_len;
- } else {
- seg->bv_offset = 0;
- seg->bv_len = total - last_page * PAGE_SIZE;
- }
-}
-
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 9b3c720a31b1..5e3d45525bd3 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -18,6 +18,7 @@
#include <linux/can/error.h>
#include <linux/can/led.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
#include <linux/netdevice.h>
/*
@@ -91,6 +92,36 @@ struct can_priv {
#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* preform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
@@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
} else
goto inval_skb;
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
return false;
inval_skb:
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8fcdee1c0cf9..dad4a68fa009 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1364,8 +1364,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
if (caps.descriptor_reuse) {
tx->flags |= DMA_CTRL_REUSE;
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 76cf11e905e1..8a9792a6427a 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3adcb39fa6f5..0d9db2a14f44 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -79,15 +79,6 @@
*/
#define round_down(x, y) ((x) & ~__round_mask(x, y))
-/**
- * FIELD_SIZEOF - get the size of a struct's field
- * @t: the target struct
- * @f: the target struct's field
- * Return: the size of @f in the struct definition without having a
- * declared instance of @t.
- */
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-
#define typeof_member(T, m) typeof(((T*)0)->m)
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 3a08ecdfca11..ba0dca6aac6e 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -122,8 +122,8 @@ static inline bool movable_node_is_enabled(void)
extern void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap);
-extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -342,6 +342,9 @@ extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index f84b9163c0ee..7dfb63b81373 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -46,6 +46,14 @@
#define RTC_AL_SEC 0x0018
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index ecc88a41792a..c04f690871ca 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -40,7 +40,7 @@ typedef enum {
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
- FL_RESETING,
+ FL_RESETTING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 79bc82e30c02..491a2b7e77c1 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -55,7 +55,7 @@ static inline int of_mdio_parse_addr(struct device *dev,
}
#else /* CONFIG_OF_MDIO */
-static bool of_mdiobus_child_is_phy(struct device_node *child)
+static inline bool of_mdiobus_child_is_phy(struct device_node *child)
{
return false;
}
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index fe6cfdcfbc26..468328b1e1dd 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -69,29 +69,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
- * @kref: Reference count.
+ * @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- * zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
- struct kref kref;
+ struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
- void (*release)(struct posix_clock *clk);
};
/**
* posix_clock_register() - register a new clock
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
+ * @dev: Pointer to the initialized device. Caller must provide
+ * 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@@ -100,7 +103,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
/**
* posix_clock_unregister() - unregister a clock
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 98fe8663033a..3a67a7e45633 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -689,10 +689,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
/* Helper calls for driver to timestamp transfer */
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off);
+ size_t progress, bool irqs_off);
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off);
+ size_t progress, bool irqs_off);
/* the spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
index 85ec745767bd..966146f7267a 100644
--- a/include/linux/sxgbe_platform.h
+++ b/include/linux/sxgbe_platform.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * 10G controller driver for Samsung EXYNOS SoCs
+ * 10G controller driver for Samsung Exynos SoCs
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2960dedcfde8..5262b7a76d39 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1232,6 +1232,7 @@ asmlinkage long sys_ni_syscall(void);
*/
int ksys_umount(char __user *name, int flags);
+int ksys_dup(unsigned int fildes);
int ksys_chroot(const char __user *filename);
ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
int ksys_chdir(const char __user *filename);
diff --git a/include/net/dst.h b/include/net/dst.h
index 8224dad2ae94..3448cf865ede 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
- dst->ops->update_pmtu(dst, NULL, skb, mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
@@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
u32 encap_mtu = dst_mtu(encap_dst);
if (skb->len > encap_mtu - headroom)
- skb_dst_update_pmtu(skb, encap_mtu - headroom);
+ skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
}
#endif /* _NET_DST_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 5ec645f27ee3..443863c7b8da 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -27,7 +27,8 @@ struct dst_ops {
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index f0897b3c97fb..415b8f49d150 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -106,6 +106,12 @@ struct flow_offload {
};
#define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp (u32)jiffies
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - nf_flowtable_time_stamp);
+}
struct nf_flow_route {
struct {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 144f264ea394..fceddf89592a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -308,6 +308,7 @@ struct tcf_proto_ops {
int (*delete)(struct tcf_proto *tp, void *arg,
bool *last, bool rtnl_held,
struct netlink_ext_ack *);
+ bool (*delete_empty)(struct tcf_proto *tp);
void (*walk)(struct tcf_proto *tp,
struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
@@ -336,6 +337,10 @@ struct tcf_proto_ops {
int flags;
};
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
enum tcf_proto_ops_flags {
TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 753f54e17e0a..e3518fd6b95b 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -69,7 +69,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
@@ -83,7 +83,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
#include <linux/err.h>
-static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
+static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
int access)
{
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 81429acc8257..64314ff76612 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -114,9 +114,9 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_umem_odp *
-ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops);
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access);
struct ib_umem_odp *
ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
@@ -134,7 +134,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline struct ib_umem_odp *
-ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops)
{
return ERR_PTR(-EINVAL);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6506df9f31ae..1f779fad3a1e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4166,6 +4166,15 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
}
+/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
+ * space. This function should be called when 'current' is the owning MM.
+ */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags);
+
+/* ib_advise_mr - give an advice about an address range in a memory region */
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge);
/**
* ib_dereg_mr_user - Deregisters a memory region and removes it from the
* HCA translation table.
diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
index 04f6748fc50b..92ade10ed67e 100644
--- a/arch/riscv/include/asm/sifive_l2_cache.h
+++ b/include/soc/sifive/sifive_l2_cache.h
@@ -4,8 +4,8 @@
*
*/
-#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
-#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+#ifndef __SOC_SIFIVE_L2_CACHE_H
+#define __SOC_SIFIVE_L2_CACHE_H
extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
@@ -13,4 +13,4 @@ extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
#define SIFIVE_L2_ERR_TYPE_CE 0
#define SIFIVE_L2_ERR_TYPE_UE 1
-#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
+#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 95fba0471e5b..3f249e150c0c 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template,
TP_ARGS(ip, parent_ip),
TP_STRUCT__entry(
- __field(u32, caller_offs)
- __field(u32, parent_offs)
+ __field(s32, caller_offs)
+ __field(s32, parent_offs)
),
TP_fast_assign(
- __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
- __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
),
TP_printk("caller=%pS parent=%pS",
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index f056b2a00d5c..9a61c28ed3ae 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -34,6 +34,7 @@ struct input_event {
__kernel_ulong_t __sec;
#if defined(__sparc__) && defined(__arch64__)
unsigned int __usec;
+ unsigned int __pad;
#else
__kernel_ulong_t __usec;
#endif
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 409d3ad1e6e2..1d0350e44ae3 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -9,11 +9,11 @@
* and the comment before kcov_remote_start() for usage details.
*/
struct kcov_remote_arg {
- unsigned int trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
- unsigned int area_size; /* Length of coverage buffer in words */
- unsigned int num_handles; /* Size of handles array */
- __u64 common_handle;
- __u64 handles[0];
+ __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
+ __u32 area_size; /* Length of coverage buffer in words */
+ __u32 num_handles; /* Size of handles array */
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
};
#define KCOV_REMOTE_MAX_HANDLES 0x100
diff --git a/init/main.c b/init/main.c
index 1ecfd43ed464..2cd736059416 100644
--- a/init/main.c
+++ b/init/main.c
@@ -93,7 +93,6 @@
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
-#include <linux/file.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -1158,26 +1157,13 @@ static int __ref kernel_init(void *unused)
void console_on_rootfs(void)
{
- struct file *file;
- unsigned int i;
-
- /* Open /dev/console in kernelspace, this should never fail */
- file = filp_open("/dev/console", O_RDWR, 0);
- if (IS_ERR(file))
- goto err_out;
-
- /* create stdin/stdout/stderr, this should never fail */
- for (i = 0; i < 3; i++) {
- if (f_dupfd(i, file, 0) != i)
- goto err_out;
- }
-
- return;
+ /* Open the /dev/console as stdin, this should never fail */
+ if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ pr_err("Warning: unable to open an initial console.\n");
-err_out:
- /* no panic -- this might not be fatal */
- pr_err("Warning: unable to open an initial console.\n");
- return;
+ /* create stdout/stderr */
+ (void) ksys_dup(0);
+ (void) ksys_dup(0);
}
static noinline void __init kernel_init_freeable(void)
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 4fb20ab179fe..9e43b72eb619 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
*/
static void cgroup_bpf_release(struct work_struct *work)
{
- struct cgroup *cgrp = container_of(work, struct cgroup,
- bpf.release_work);
+ struct cgroup *p, *cgrp = container_of(work, struct cgroup,
+ bpf.release_work);
enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *old_array;
unsigned int type;
@@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work)
mutex_unlock(&cgroup_mutex);
+ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ cgroup_bpf_put(p);
+
percpu_ref_exit(&cgrp->bpf.refcnt);
cgroup_put(cgrp);
}
@@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
*/
#define NR ARRAY_SIZE(cgrp->bpf.effective)
struct bpf_prog_array *arrays[NR] = {};
+ struct cgroup *p;
int ret, i;
ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
@@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
if (ret)
return ret;
+ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ cgroup_bpf_get(p);
+
for (i = 0; i < NR; i++)
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4983940cbdca..ce85e7041f0c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -907,7 +907,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
-static void __mark_reg_not_init(struct bpf_reg_state *reg);
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
@@ -945,7 +946,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
- __mark_reg_not_init(regs + regno);
+ __mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
@@ -1054,7 +1055,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
}
/* Mark a register as having a completely unknown (scalar) value. */
-static void __mark_reg_unknown(struct bpf_reg_state *reg)
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg)
{
/*
* Clear type, id, off, and union(map_ptr, range) and
@@ -1064,6 +1066,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
reg->type = SCALAR_VALUE;
reg->var_off = tnum_unknown;
reg->frameno = 0;
+ reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
+ true : false;
__mark_reg_unbounded(reg);
}
@@ -1074,19 +1078,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
- __mark_reg_not_init(regs + regno);
+ __mark_reg_not_init(env, regs + regno);
return;
}
- regs += regno;
- __mark_reg_unknown(regs);
- /* constant backtracking is enabled for root without bpf2bpf calls */
- regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
- true : false;
+ __mark_reg_unknown(env, regs + regno);
}
-static void __mark_reg_not_init(struct bpf_reg_state *reg)
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg)
{
- __mark_reg_unknown(reg);
+ __mark_reg_unknown(env, reg);
reg->type = NOT_INIT;
}
@@ -1097,10 +1098,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
- __mark_reg_not_init(regs + regno);
+ __mark_reg_not_init(env, regs + regno);
return;
}
- __mark_reg_not_init(regs + regno);
+ __mark_reg_not_init(env, regs + regno);
}
#define DEF_NOT_SUBREG (0)
@@ -3234,7 +3235,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
- __mark_reg_unknown(&state->stack[spi].spilled_ptr);
+ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
state->stack[spi].slot_type[j] = STACK_MISC;
goto mark;
@@ -3892,7 +3893,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
if (!reg)
continue;
if (reg_is_pkt_pointer_any(reg))
- __mark_reg_unknown(reg);
+ __mark_reg_unknown(env, reg);
}
}
@@ -3920,7 +3921,7 @@ static void release_reg_references(struct bpf_verifier_env *env,
if (!reg)
continue;
if (reg->ref_obj_id == ref_obj_id)
- __mark_reg_unknown(reg);
+ __mark_reg_unknown(env, reg);
}
}
@@ -4582,7 +4583,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
- __mark_reg_unknown(dst_reg);
+ __mark_reg_unknown(env, dst_reg);
return 0;
}
@@ -4834,13 +4835,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
- __mark_reg_unknown(dst_reg);
+ __mark_reg_unknown(env, dst_reg);
return 0;
}
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
- __mark_reg_unknown(dst_reg);
+ __mark_reg_unknown(env, dst_reg);
return 0;
}
@@ -6263,6 +6264,7 @@ static bool may_access_skb(enum bpf_prog_type type)
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
+ static const int ctx_reg = BPF_REG_6;
u8 mode = BPF_MODE(insn->code);
int i, err;
@@ -6296,7 +6298,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check whether implicit source operand (register R6) is readable */
- err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+ err = check_reg_arg(env, ctx_reg, SRC_OP);
if (err)
return err;
@@ -6315,7 +6317,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
- if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+ if (regs[ctx_reg].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
@@ -6328,6 +6330,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
+ err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
+ if (err < 0)
+ return err;
+
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
@@ -6982,7 +6988,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
/* since the register is unused, clear its state
* to make further comparison simpler
*/
- __mark_reg_not_init(&st->regs[i]);
+ __mark_reg_not_init(env, &st->regs[i]);
}
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
@@ -6990,7 +6996,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
/* liveness must not touch this stack slot anymore */
st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
if (!(live & REG_LIVE_READ)) {
- __mark_reg_not_init(&st->stack[i].spilled_ptr);
+ __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
}
diff --git a/kernel/cred.c b/kernel/cred.c
index c0a4c12d38b2..9ed51b70ed80 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void)
new->magic = CRED_MAGIC;
#endif
- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
return new;
@@ -282,7 +282,7 @@ struct cred *prepare_creds(void)
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
validate_creds(new);
return new;
@@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
#ifdef CONFIG_SECURITY
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
put_cred(old);
diff --git a/kernel/exit.c b/kernel/exit.c
index bcbd59888e67..2833ffb0c211 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
}
write_unlock_irq(&tasklist_lock);
- if (unlikely(pid_ns == &init_pid_ns)) {
- panic("Attempted to kill init! exitcode=0x%08x\n",
- father->signal->group_exit_code ?: father->exit_code);
- }
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
list_del_init(&p->ptrace_entry);
@@ -766,6 +762,14 @@ void __noreturn do_exit(long code)
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+ /*
+ * If the last thread of global init has exited, panic
+ * immediately to get a useable coredump.
+ */
+ if (unlikely(is_global_init(tsk)))
+ panic("Attempted to kill init! exitcode=0x%08x\n",
+ tsk->signal->group_exit_code ?: (int)code);
+
#ifdef CONFIG_POSIX_TIMERS
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
diff --git a/kernel/fork.c b/kernel/fork.c
index 2508a4f238a3..080809560072 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2578,6 +2578,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
#endif
#ifdef __ARCH_WANT_SYS_CLONE3
+
+/*
+ * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from
+ * the registers containing the syscall arguments for clone. This doesn't work
+ * with clone3 since the TLS value is passed in clone_args instead.
+ */
+#ifndef CONFIG_HAVE_COPY_THREAD_TLS
+#error clone3 requires copy_thread_tls support in arch
+#endif
+
noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
struct clone_args __user *uargs,
size_t usize)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 12d2227e5786..b6ea3dcb57bf 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1026,6 +1026,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter,
struct seccomp_notif unotif;
ssize_t ret;
+ /* Verify that we're not given garbage to keep struct extensible. */
+ ret = check_zeroed_user(buf, sizeof(unotif));
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -EINVAL;
+
memset(&unotif, 0, sizeof(unotif));
ret = down_interruptible(&filter->notif->request);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 13a0f2e6ebc2..e2ac0e37c4ae 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
- struct taskstats *stats;
+ struct taskstats *stats_new, *stats;
- if (sig->stats || thread_group_empty(tsk))
- goto ret;
+ /* Pairs with smp_store_release() below. */
+ stats = smp_load_acquire(&sig->stats);
+ if (stats || thread_group_empty(tsk))
+ return stats;
/* No problem if kmem_cache_zalloc() fails */
- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
spin_lock_irq(&tsk->sighand->siglock);
- if (!sig->stats) {
- sig->stats = stats;
- stats = NULL;
+ stats = sig->stats;
+ if (!stats) {
+ /*
+ * Pairs with smp_store_release() above and order the
+ * kmem_cache_zalloc().
+ */
+ smp_store_release(&sig->stats, stats_new);
+ stats = stats_new;
+ stats_new = NULL;
}
spin_unlock_irq(&tsk->sighand->siglock);
- if (stats)
- kmem_cache_free(taskstats_cache, stats);
-ret:
- return sig->stats;
+ if (stats_new)
+ kmem_cache_free(taskstats_cache, stats_new);
+
+ return stats;
}
/* Send pid data out on exit */
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index ec960bb939fd..200fb2d3be99 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -14,8 +14,6 @@
#include "posix-timers.h"
-static void delete_clock(struct kref *kref);
-
/*
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
*/
@@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
err = 0;
if (!err) {
- kref_get(&clk->kref);
+ get_device(clk->dev);
fp->private_data = clk;
}
out:
@@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
if (clk->ops.release)
err = clk->ops.release(clk);
- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);
fp->private_data = NULL;
@@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
#endif
};
-int posix_clock_register(struct posix_clock *clk, dev_t devid)
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
{
int err;
- kref_init(&clk->kref);
init_rwsem(&clk->rwsem);
cdev_init(&clk->cdev, &posix_clock_file_operations);
+ err = cdev_device_add(&clk->cdev, dev);
+ if (err) {
+ pr_err("%s unable to add device %d:%d\n",
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+ return err;
+ }
clk->cdev.owner = clk->ops.owner;
- err = cdev_add(&clk->cdev, devid, 1);
+ clk->dev = dev;
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(posix_clock_register);
-static void delete_clock(struct kref *kref)
-{
- struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-
- if (clk->release)
- clk->release(clk);
-}
-
void posix_clock_unregister(struct posix_clock *clk)
{
- cdev_del(&clk->cdev);
+ cdev_device_del(&clk->cdev, clk->dev);
down_write(&clk->rwsem);
clk->zombie = true;
up_write(&clk->rwsem);
- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);
}
EXPORT_SYMBOL_GPL(posix_clock_unregister);
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index a2659735db73..1af321dec0f1 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
return 0;
}
+/*
+ * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
+ * functions. But those archs currently don't support direct functions
+ * anyway, and ftrace_find_rec_direct() is just a stub for them.
+ * Define MCOUNT_INSN_SIZE to keep those archs compiling.
+ */
+#ifndef MCOUNT_INSN_SIZE
+/* Make sure this only works without direct calls */
+# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+# error MCOUNT_INSN_SIZE not defined with direct calls enabled
+# endif
+# define MCOUNT_INSN_SIZE 0
+#endif
+
int function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp)
{
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ac99a3500076..9bf1f2cd515e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v)
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- avg = rec->time;
- do_div(avg, rec->counter);
+ avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
goto out;
#endif
@@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v)
* Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again.
*/
- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev,
+ rec->counter * (rec->counter - 1) * 1000);
}
trace_seq_init(&s);
diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
index d45079ee62f8..22bcf7c51d1e 100644
--- a/kernel/trace/trace_events_inject.c
+++ b/kernel/trace/trace_events_inject.c
@@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
unsigned long irq_flags;
void *entry = NULL;
int entry_size;
- u64 val;
+ u64 val = 0;
int len;
entry = trace_alloc_entry(call, &entry_size);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5e43b9664eca..617e297f46dc 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
- return;
+ goto fail_deprobe_sched_switch;
}
wakeup_reset(tr);
@@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
+fail_deprobe_sched_switch:
+ unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 344e4c1aa09c..87de6edafd14 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -381,7 +381,7 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
- unsigned int save_len = s->seq.len;
+ unsigned int save_len = s->seq.len;
if (s->full)
return 0;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 4df9a209f7ca..c557f42a9397 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack)
local_irq_restore(flags);
}
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 7dd602d7f8db..ad9d5b1c4473 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
unsigned long i, nr_pages, addr, next;
int nr;
struct page **pages;
+ int ret = 0;
if (gup->size > ULONG_MAX)
return -EINVAL;
@@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
NULL);
break;
default:
- return -1;
+ kvfree(pages);
+ ret = -EINVAL;
+ goto out;
}
if (nr <= 0)
@@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
gup->put_delta_usec = ktime_us_delta(end_time, start_time);
kvfree(pages);
- return 0;
+out:
+ return ret;
}
static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ac65bb5e38ac..dd8737a94bec 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,7 @@
#include <linux/swapops.h>
#include <linux/jhash.h>
#include <linux/numa.h>
+#include <linux/llist.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page)
page[2].mapping = NULL;
}
-void free_huge_page(struct page *page)
+static void __free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
@@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page)
spin_unlock(&hugetlb_lock);
}
+/*
+ * As free_huge_page() can be called from a non-task context, we have
+ * to defer the actual freeing in a workqueue to prevent potential
+ * hugetlb_lock deadlock.
+ *
+ * free_hpage_workfn() locklessly retrieves the linked list of pages to
+ * be freed and frees them one-by-one. As the page->mapping pointer is
+ * going to be cleared in __free_huge_page() anyway, it is reused as the
+ * llist_node structure of a lockless linked list of huge pages to be freed.
+ */
+static LLIST_HEAD(hpage_freelist);
+
+static void free_hpage_workfn(struct work_struct *work)
+{
+ struct llist_node *node;
+ struct page *page;
+
+ node = llist_del_all(&hpage_freelist);
+
+ while (node) {
+ page = container_of((struct address_space **)node,
+ struct page, mapping);
+ node = node->next;
+ __free_huge_page(page);
+ }
+}
+static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
+
+void free_huge_page(struct page *page)
+{
+ /*
+ * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
+ */
+ if (!in_task()) {
+ /*
+ * Only call schedule_work() if hpage_freelist is previously
+ * empty. Otherwise, schedule_work() had been called but the
+ * workfn hasn't retrieved the list yet.
+ */
+ if (llist_add((struct llist_node *)&page->mapping,
+ &hpage_freelist))
+ schedule_work(&free_hpage_work);
+ return;
+ }
+
+ __free_huge_page(page);
+}
+
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
INIT_LIST_HEAD(&page->lru);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 55ac23ef11c1..a91a072f2b2c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -483,8 +483,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
-static void __remove_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages)
+void __ref remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages)
{
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long flags;
@@ -499,28 +500,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
return;
#endif
+ clear_zone_contiguous(zone);
+
pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
+
+ set_zone_contiguous(zone);
}
-static void __remove_section(struct zone *zone, unsigned long pfn,
- unsigned long nr_pages, unsigned long map_offset,
- struct vmem_altmap *altmap)
+static void __remove_section(unsigned long pfn, unsigned long nr_pages,
+ unsigned long map_offset,
+ struct vmem_altmap *altmap)
{
struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
if (WARN_ON_ONCE(!valid_section(ms)))
return;
- __remove_zone(zone, pfn, nr_pages);
sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
}
/**
- * __remove_pages() - remove sections of pages from a zone
- * @zone: zone from which pages need to be removed
+ * __remove_pages() - remove sections of pages
* @pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
* @altmap: alternative device page map or %NULL if default memmap is used
@@ -530,16 +533,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
-void __remove_pages(struct zone *zone, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap)
+void __remove_pages(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
{
unsigned long map_offset = 0;
unsigned long nr, start_sec, end_sec;
map_offset = vmem_altmap_offset(altmap);
- clear_zone_contiguous(zone);
-
if (check_pfn_span(pfn, nr_pages, "remove"))
return;
@@ -551,13 +552,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
cond_resched();
pfns = min(nr_pages, PAGES_PER_SECTION
- (pfn & ~PAGE_SECTION_MASK));
- __remove_section(zone, pfn, pfns, map_offset, altmap);
+ __remove_section(pfn, pfns, map_offset, altmap);
pfn += pfns;
nr_pages -= pfns;
map_offset = 0;
}
-
- set_zone_contiguous(zone);
}
int set_online_page_callback(online_page_callback_t callback)
@@ -869,6 +868,7 @@ failed_addition:
(unsigned long long) pfn << PAGE_SHIFT,
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
+ remove_pfn_range_from_zone(zone, pfn, nr_pages);
mem_hotplug_done();
return ret;
}
@@ -1628,6 +1628,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
+ remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
mem_hotplug_done();
return 0;
diff --git a/mm/memremap.c b/mm/memremap.c
index 03ccbdfeb697..c51c6bd2fe34 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
mem_hotplug_begin();
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+ __remove_pages(PHYS_PFN(res->start),
PHYS_PFN(resource_size(res)), NULL);
} else {
arch_remove_memory(nid, res->start, resource_size(res),
diff --git a/mm/migrate.c b/mm/migrate.c
index eae1565285e3..86873b6f38a7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1512,9 +1512,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
/*
* Resolves the given address to a struct page, isolates it from the LRU and
* puts it to the given pagelist.
- * Returns -errno if the page cannot be found/isolated or 0 when it has been
- * queued or the page doesn't need to be migrated because it is already on
- * the target node
+ * Returns:
+ * errno - if the page cannot be found/isolated
+ * 0 - when it doesn't have to be migrated because it is already on the
+ * target node
+ * 1 - when it has been queued
*/
static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
int node, struct list_head *pagelist, bool migrate_all)
@@ -1553,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (PageHuge(page)) {
if (PageHead(page)) {
isolate_huge_page(page, pagelist);
- err = 0;
+ err = 1;
}
} else {
struct page *head;
@@ -1563,7 +1565,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (err)
goto out_putpage;
- err = 0;
+ err = 1;
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
@@ -1640,8 +1642,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
*/
err = add_page_for_migration(mm, addr, current_node,
&pagelist, flags & MPOL_MF_MOVE_ALL);
- if (!err)
+
+ if (!err) {
+ /* The page is already on the target node */
+ err = store_status(status, i, current_node, 1);
+ if (err)
+ goto out_flush;
continue;
+ } else if (err > 0) {
+ /* The page is successfully queued for migration */
+ continue;
+ }
err = store_status(status, i, err, 1);
if (err)
diff --git a/mm/mmap.c b/mm/mmap.c
index 9c648524e4dc..71e4ffc83bcd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 71e3acea7817..d58c481b3df8 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
K(get_mm_counter(mm, MM_FILEPAGES)),
K(get_mm_counter(mm, MM_SHMEMPAGES)),
from_kuid(&init_user_ns, task_uid(victim)),
- mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
+ mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
task_unlock(victim);
/*
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2b2b9aae8a3c..22d17ecfe7df 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
zs_pool_dec_isolated(pool);
}
+ if (page_zone(newpage) != page_zone(page)) {
+ dec_zone_page_state(page, NR_ZSPAGES);
+ inc_zone_page_state(newpage, NR_ZSPAGES);
+ }
+
reset_page(page);
put_page(page);
page = newpage;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index c46daf09a501..bb7ec1a3915d 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -126,6 +126,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e5bff5cc6f97..2a78da4072de 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -586,7 +586,8 @@ static int vlan_dev_init(struct net_device *dev)
return 0;
}
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index c482a6fe9393..0db85aeb119b 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -108,11 +108,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
- int rem;
+ int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ if (err)
+ return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@@ -123,7 +125,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
- vlan_dev_set_egress_priority(dev, m->from, m->to);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+ return err;
}
}
return 0;
@@ -179,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
-
- return register_vlan_dev(dev, extack);
+ if (!err)
+ err = register_vlan_dev(dev, extack);
+ if (err)
+ vlan_dev_uninit(dev);
+ return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 2cdfc5d6c25d..8c69f0c95a8e 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -22,7 +22,8 @@
#endif
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4096d8a74a2b..e1256e03a9a8 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
}
static int ebt_buf_add(struct ebt_entries_buf_state *state,
- void *data, unsigned int sz)
+ const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
@@ -1901,7 +1901,7 @@ enum compat_mwt {
EBT_COMPAT_TARGET,
};
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
@@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
+ const char *buf = (const char *)match32;
int growth = 0;
- char *buf;
if (size_left == 0)
return 0;
- buf = (char *) match32;
-
- while (size_left >= sizeof(*match32)) {
+ do {
struct ebt_entry_match *match_kern;
int ret;
+ if (size_left < sizeof(*match32))
+ return -EINVAL;
+
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
@@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- /* rule should have no remaining data after target */
- if (type == EBT_COMPAT_TARGET && size_left)
- return -EINVAL;
-
match32 = (struct compat_ebt_entry_mwt *) buf;
- }
+ } while (size_left);
return growth;
}
/* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
- unsigned int i, j, startoff, new_offset = 0;
+ unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
@@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
}
- startoff = state->buf_user_offset - startoff;
+ next_expected_off = state->buf_user_offset - startoff;
+ if (next_expected_off != entry->next_offset)
+ return -EINVAL;
- if (WARN_ON(*total < startoff))
+ if (*total < entry->next_offset)
return -EINVAL;
- *total -= startoff;
+ *total -= entry->next_offset;
return 0;
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index aea918135ec3..08c3dc45f1a4 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb , u32 mtu);
+ struct sk_buff *skb , u32 mtu,
+ bool confirm_neigh);
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
* advertise to the other end).
*/
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct dn_route *rt = (struct dn_route *) dst;
struct neighbour *n = rt->n;
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 94447974a3c0..d5f709b940ff 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -20,6 +20,8 @@
#include "hsr_main.h"
#include "hsr_framereg.h"
+static struct dentry *hsr_debugfs_root_dir;
+
static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
{
seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
@@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
return single_open(filp, hsr_node_table_show, inode->i_private);
}
+void hsr_debugfs_rename(struct net_device *dev)
+{
+ struct hsr_priv *priv = netdev_priv(dev);
+ struct dentry *d;
+
+ d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
+ hsr_debugfs_root_dir, dev->name);
+ if (IS_ERR(d))
+ netdev_warn(dev, "failed to rename\n");
+ else
+ priv->node_tbl_root = d;
+}
+
static const struct file_operations hsr_fops = {
- .owner = THIS_MODULE,
.open = hsr_node_table_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = {
* When debugfs is configured this routine sets up the node_table file per
* hsr device for dumping the node_table entries
*/
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
{
- int rc = -1;
struct dentry *de = NULL;
- de = debugfs_create_dir(hsr_dev->name, NULL);
- if (!de) {
- pr_err("Cannot create hsr debugfs root\n");
- return rc;
+ de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
+ if (IS_ERR(de)) {
+ pr_err("Cannot create hsr debugfs directory\n");
+ return;
}
priv->node_tbl_root = de;
@@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
de = debugfs_create_file("node_table", S_IFREG | 0444,
priv->node_tbl_root, priv,
&hsr_fops);
- if (!de) {
- pr_err("Cannot create hsr node_table directory\n");
- return rc;
+ if (IS_ERR(de)) {
+ pr_err("Cannot create hsr node_table file\n");
+ debugfs_remove(priv->node_tbl_root);
+ priv->node_tbl_root = NULL;
+ return;
}
priv->node_tbl_file = de;
-
- return 0;
}
/* hsr_debugfs_term - Tear down debugfs intrastructure
@@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv)
debugfs_remove(priv->node_tbl_root);
priv->node_tbl_root = NULL;
}
+
+void hsr_debugfs_create_root(void)
+{
+ hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
+ if (IS_ERR(hsr_debugfs_root_dir)) {
+ pr_err("Cannot create hsr debugfs root directory\n");
+ hsr_debugfs_root_dir = NULL;
+ }
+}
+
+void hsr_debugfs_remove_root(void)
+{
+ /* debugfs_remove() internally checks NULL and ERROR */
+ debugfs_remove(hsr_debugfs_root_dir);
+}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index b01e1bae4ddc..c7bd6c49fadf 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
skb->dev->dev_addr, skb->len) <= 0)
goto out;
skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
if (hsr_ver > 0) {
hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
@@ -368,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer);
- hsr_del_self_node(&hsr->self_node_db);
+ hsr_del_self_node(hsr);
hsr_del_nodes(&hsr->node_db);
}
@@ -440,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
INIT_LIST_HEAD(&hsr->ports);
INIT_LIST_HEAD(&hsr->node_db);
INIT_LIST_HEAD(&hsr->self_node_db);
+ spin_lock_init(&hsr->list_lock);
ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
/* Make sure we recognize frames from ourselves in hsr_rcv() */
- res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
+ res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
slave[1]->dev_addr);
if (res < 0)
return res;
@@ -477,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
- goto err_add_port;
+ goto err_add_master;
res = register_netdevice(hsr_dev);
if (res)
- goto fail;
+ goto err_unregister;
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
if (res)
- goto fail;
+ goto err_add_slaves;
+
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
if (res)
- goto fail;
+ goto err_add_slaves;
+ hsr_debugfs_init(hsr, hsr_dev);
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
- res = hsr_debugfs_init(hsr, hsr_dev);
- if (res)
- goto fail;
return 0;
-fail:
+err_add_slaves:
+ unregister_netdevice(hsr_dev);
+err_unregister:
list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr_del_port(port);
-err_add_port:
- hsr_del_self_node(&hsr->self_node_db);
+err_add_master:
+ hsr_del_self_node(hsr);
return res;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 292be446007b..27dc65d7de67 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
* frames from self that's been looped over the HSR ring.
*/
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
unsigned char addr_a[ETH_ALEN],
unsigned char addr_b[ETH_ALEN])
{
+ struct list_head *self_node_db = &hsr->self_node_db;
struct hsr_node *node, *oldnode;
node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db,
ether_addr_copy(node->macaddress_A, addr_a);
ether_addr_copy(node->macaddress_B, addr_b);
- rcu_read_lock();
+ spin_lock_bh(&hsr->list_lock);
oldnode = list_first_or_null_rcu(self_node_db,
struct hsr_node, mac_list);
if (oldnode) {
list_replace_rcu(&oldnode->mac_list, &node->mac_list);
- rcu_read_unlock();
- synchronize_rcu();
- kfree(oldnode);
+ spin_unlock_bh(&hsr->list_lock);
+ kfree_rcu(oldnode, rcu_head);
} else {
- rcu_read_unlock();
list_add_tail_rcu(&node->mac_list, self_node_db);
+ spin_unlock_bh(&hsr->list_lock);
}
return 0;
}
-void hsr_del_self_node(struct list_head *self_node_db)
+void hsr_del_self_node(struct hsr_priv *hsr)
{
+ struct list_head *self_node_db = &hsr->self_node_db;
struct hsr_node *node;
- rcu_read_lock();
+ spin_lock_bh(&hsr->list_lock);
node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
- rcu_read_unlock();
if (node) {
list_del_rcu(&node->mac_list);
- kfree(node);
+ kfree_rcu(node, rcu_head);
}
+ spin_unlock_bh(&hsr->list_lock);
}
void hsr_del_nodes(struct list_head *node_db)
@@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db)
* seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node.
*/
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
- u16 seq_out)
+static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ struct list_head *node_db,
+ unsigned char addr[],
+ u16 seq_out)
{
- struct hsr_node *node;
+ struct hsr_node *new_node, *node;
unsigned long now;
int i;
- node = kzalloc(sizeof(*node), GFP_ATOMIC);
- if (!node)
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
return NULL;
- ether_addr_copy(node->macaddress_A, addr);
+ ether_addr_copy(new_node->macaddress_A, addr);
/* We are only interested in time diffs here, so use current jiffies
* as initialization. (0 could trigger an spurious ring error warning).
*/
now = jiffies;
for (i = 0; i < HSR_PT_PORTS; i++)
- node->time_in[i] = now;
+ new_node->time_in[i] = now;
for (i = 0; i < HSR_PT_PORTS; i++)
- node->seq_out[i] = seq_out;
-
- list_add_tail_rcu(&node->mac_list, node_db);
+ new_node->seq_out[i] = seq_out;
+ spin_lock_bh(&hsr->list_lock);
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, addr))
+ goto out;
+ if (ether_addr_equal(node->macaddress_B, addr))
+ goto out;
+ }
+ list_add_tail_rcu(&new_node->mac_list, node_db);
+ spin_unlock_bh(&hsr->list_lock);
+ return new_node;
+out:
+ spin_unlock_bh(&hsr->list_lock);
+ kfree(new_node);
return node;
}
@@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup)
{
struct list_head *node_db = &port->hsr->node_db;
+ struct hsr_priv *hsr = port->hsr;
struct hsr_node *node;
struct ethhdr *ethhdr;
u16 seq_out;
@@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
seq_out = HSR_SEQNR_START;
}
- return hsr_add_node(node_db, ethhdr->h_source, seq_out);
+ return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
}
/* Use the Supervision frame's info about an eventual macaddress_B for merging
@@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
struct hsr_port *port_rcv)
{
- struct ethhdr *ethhdr;
- struct hsr_node *node_real;
+ struct hsr_priv *hsr = port_rcv->hsr;
struct hsr_sup_payload *hsr_sp;
+ struct hsr_node *node_real;
struct list_head *node_db;
+ struct ethhdr *ethhdr;
int i;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
if (!node_real)
/* No frame received from AddrA of this node yet */
- node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
+ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
HSR_SEQNR_START - 1);
if (!node_real)
goto done; /* No mem */
@@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
}
node_real->addr_B_port = port_rcv->type;
+ spin_lock_bh(&hsr->list_lock);
list_del_rcu(&node_curr->mac_list);
+ spin_unlock_bh(&hsr->list_lock);
kfree_rcu(node_curr, rcu_head);
done:
@@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t)
{
struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
struct hsr_node *node;
+ struct hsr_node *tmp;
struct hsr_port *port;
unsigned long timestamp;
unsigned long time_a, time_b;
- rcu_read_lock();
- list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
+ spin_lock_bh(&hsr->list_lock);
+ list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
* nor time_in[HSR_PT_SLAVE_B], will ever be updated for
* the master port. Thus the master node will be repeatedly
@@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t)
kfree_rcu(node, rcu_head);
}
}
- rcu_read_unlock();
+ spin_unlock_bh(&hsr->list_lock);
/* Restart timer */
mod_timer(&hsr->prune_timer,
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 89a3ce38151d..0f0fa12b4329 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -12,10 +12,8 @@
struct hsr_node;
-void hsr_del_self_node(struct list_head *self_node_db);
+void hsr_del_self_node(struct hsr_priv *hsr);
void hsr_del_nodes(struct list_head *node_db);
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
- u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup);
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
@@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
void hsr_prune_nodes(struct timer_list *t);
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
unsigned char addr_a[ETH_ALEN],
unsigned char addr_b[ETH_ALEN]);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index b9988a662ee1..9e389accbfc7 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
case NETDEV_CHANGE: /* Link (carrier) state changes */
hsr_check_carrier_and_operstate(hsr);
break;
+ case NETDEV_CHANGENAME:
+ if (is_hsr_master(dev))
+ hsr_debugfs_rename(dev);
+ break;
case NETDEV_CHANGEADDR:
if (port->type == HSR_PT_MASTER) {
/* This should not happen since there's no
@@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
/* Make sure we recognize frames from ourselves in hsr_rcv() */
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
- res = hsr_create_self_node(&hsr->self_node_db,
+ res = hsr_create_self_node(hsr,
master->dev->dev_addr,
port ?
port->dev->dev_addr :
@@ -123,6 +127,7 @@ static void __exit hsr_exit(void)
{
unregister_netdevice_notifier(&hsr_nb);
hsr_netlink_exit();
+ hsr_debugfs_remove_root();
}
module_init(hsr_init);
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 96fac696a1e1..d40de84a637f 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -160,8 +160,9 @@ struct hsr_priv {
int announce_count;
u16 sequence_nr;
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
- u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
- spinlock_t seqnr_lock; /* locking for sequence_nr */
+ u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
+ spinlock_t seqnr_lock; /* locking for sequence_nr */
+ spinlock_t list_lock; /* locking for node list */
unsigned char sup_multicast_addr[ETH_ALEN];
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
@@ -184,17 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+void hsr_debugfs_rename(struct net_device *dev);
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
void hsr_debugfs_term(struct hsr_priv *priv);
+void hsr_debugfs_create_root(void);
+void hsr_debugfs_remove_root(void);
#else
-static inline int hsr_debugfs_init(struct hsr_priv *priv,
- struct net_device *hsr_dev)
+static inline void void hsr_debugfs_rename(struct net_device *dev)
{
- return 0;
}
-
+static inline void hsr_debugfs_init(struct hsr_priv *priv,
+ struct net_device *hsr_dev)
+{}
static inline void hsr_debugfs_term(struct hsr_priv *priv)
{}
+static inline void hsr_debugfs_create_root(void)
+{}
+static inline void hsr_debugfs_remove_root(void)
+{}
#endif
#endif /* __HSR_PRIVATE_H */
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 8f8337f893ba..8dc0547f01d0 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -476,6 +476,7 @@ int __init hsr_netlink_init(void)
if (rc)
goto fail_genl_register_family;
+ hsr_debugfs_create_root();
return 0;
fail_genl_register_family:
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index e4c6e8b40490..18c0d5bffe12 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
if (!dst)
goto out;
}
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = __sk_dst_check(sk, 0);
if (!dst)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 38c02bb62e2c..0fe2a5d3e258 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_valid_dst(skb))
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9b153c7fcbb4..e90b600c7a25 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -214,7 +214,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 214154b47d56..069f72edb264 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -384,10 +384,11 @@ next: ;
return 1;
}
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
@@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
}
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+ unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
@@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
@@ -512,7 +514,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+ struct xt_table_info *newinfo,
+ void *entry0,
const struct arpt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
@@ -569,7 +573,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, repl->name, repl->size,
+ ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
@@ -974,7 +978,7 @@ static int do_replace(struct net *net, const void __user *user,
goto free_newinfo;
}
- ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1149,7 +1153,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
}
}
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+ struct xt_table_info **pinfo,
void **pentry0,
const struct compat_arpt_replace *compatr)
{
@@ -1217,7 +1222,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
- ret = translate_table(newinfo, entry1, &repl);
+ ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
@@ -1270,7 +1275,7 @@ static int compat_do_replace(struct net *net, void __user *user,
goto free_newinfo;
}
- ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1546,7 +1551,7 @@ int arpt_register_table(struct net *net,
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(newinfo, loc_cpu_entry, repl);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f88c93c38f11..87e979f2b74a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -139,7 +139,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
@@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
@@ -2687,7 +2689,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 88b987ca9ebb..0238b554a1f0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1727,8 +1727,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
}
/* Ignore very old stuff early */
- if (!after(sp[used_sacks].end_seq, prior_snd_una))
+ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+ if (i == 0)
+ first_sack_index = -1;
continue;
+ }
used_sacks++;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1f7735ca8f22..58c92a7d671c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
__skb_unlink(skb, &sk->sk_write_queue);
tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
+ if (tp->highest_sack == NULL)
+ tp->highest_sack = skb;
+
tp->packets_out += tcp_skb_pcount(skb);
if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4da5758cc718..93a355b6b092 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1475,7 +1475,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
* queue contains some other skb
*/
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + sk->sk_rcvbuf))
+ if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
goto uncharge_drop;
spin_lock(&list->lock);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 35b84b52b702..9ebd54752e03 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
}
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index fe9cb8d1adca..e315526fa244 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
if (IS_ERR(dst))
return NULL;
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = inet6_csk_route_socket(sk, &fl6);
return IS_ERR(dst) ? NULL : dst;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 9d0965252ddf..ee968d980746 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
- dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
NEXTHDR_GRE);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 754a484d35df..2f376dbc37d5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
- skb_dst_update_pmtu(skb2, rel_info);
+ skb_dst_update_pmtu_no_confirm(skb2, rel_info);
}
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -1132,7 +1132,7 @@ route_lookup:
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
IPV6_MIN_MTU : IPV4_MIN_MTU);
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 024db17386d2..6f08b760c2a7 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -479,7 +479,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b59940416cb5..affb51c11a25 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -95,7 +95,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
@@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
@@ -2692,7 +2694,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
- const struct ipv6hdr *iph, u32 mtu)
+ const struct ipv6hdr *iph, u32 mtu,
+ bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
@@ -2710,7 +2713,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
daddr = NULL;
saddr = NULL;
}
- dst_confirm_neigh(dst, daddr);
+
+ if (confirm_neigh)
+ dst_confirm_neigh(dst, daddr);
+
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
return;
@@ -2764,9 +2770,11 @@ out_unlock:
}
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
- __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+ __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
+ confirm_neigh);
}
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@@ -2785,7 +2793,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
- __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
+ __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b2ccbc473127..98954830c40b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (tunnel->parms.iph.daddr)
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 699e0730ce8e..af7a4b8b1e9c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
}
static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 169e0a04f814..cf895bc80871 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1848,6 +1848,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
+ u32 lineno;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
@@ -1864,7 +1865,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
- ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
rcu_read_unlock_bh();
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index b1e300f8881b..b00866d777fe 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
struct rtable *ort = skb_rtable(skb);
if (!skb->dev && sk && sk_fullsock(sk))
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+ ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
}
static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index b6b14db3955b..b3f4a334f9d7 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
unsigned int *timeouts = data;
int i;
+ if (!timeouts)
+ timeouts = dn->dccp_timeout;
+
/* set default DCCP timeouts. */
for (i=0; i<CT_DCCP_MAX; i++)
timeouts[i] = dn->dccp_timeout[i];
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index fce3d93f1541..0399ae8f1188 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
+ if (!timeouts)
+ timeouts = sn->timeouts;
+
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
timeouts[i] = sn->timeouts[i];
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 9889d52eda82..e33a73cb1f42 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -134,11 +134,6 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
-{
- return (__s32)(timeout - (u32)jiffies);
-}
-
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
@@ -232,7 +227,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
{
int err;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index b9e7dd6e60ce..7ea2ddc2aa93 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -280,7 +280,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
skb->tstamp = 0;
@@ -509,7 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
return NF_DROP;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
skb->tstamp = 0;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index de7a0d1e15c8..d06969af1085 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -88,7 +88,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
switch (tuple->l4proto) {
case IPPROTO_TCP:
key->tcp.flags = 0;
- mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
+ mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
break;
case IPPROTO_UDP:
@@ -166,24 +166,38 @@ static int flow_offload_eth_dst(struct net *net,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
- const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
+ const struct dst_entry *dst_cache;
+ unsigned char ha[ETH_ALEN];
struct neighbour *n;
u32 mask, val;
+ u8 nud_state;
u16 val16;
- n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+ dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+ n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -ENOENT;
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(ha, n->ha);
+ read_unlock_bh(&n->lock);
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_release(n);
+ return -ENOENT;
+ }
+
mask = ~0xffffffff;
- memcpy(&val, n->ha, 4);
+ memcpy(&val, ha, 4);
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
&val, &mask);
mask = ~0x0000ffff;
- memcpy(&val16, n->ha + 4, 2);
+ memcpy(&val16, ha + 4, 2);
val = val16;
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
&val, &mask);
@@ -335,22 +349,26 @@ static void flow_offload_port_snat(struct net *net,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
- u32 mask = ~htonl(0xffff0000), port;
+ u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
+ port = htonl(port << 16);
+ mask = ~htonl(0xffff0000);
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = htonl(port);
+ mask = ~htonl(0xffff);
break;
default:
return;
}
- port = htonl(port << 16);
+
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
@@ -361,22 +379,26 @@ static void flow_offload_port_dnat(struct net *net,
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
- u32 mask = ~htonl(0xffff), port;
+ u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
- port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
- offset = 0; /* offsetof(struct tcphdr, source); */
+ port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = htonl(port);
+ mask = ~htonl(0xffff);
break;
case FLOW_OFFLOAD_DIR_REPLY:
- port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
- offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ port = htonl(port << 16);
+ mask = ~htonl(0xffff0000);
break;
default:
return;
}
- port = htonl(port);
+
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
@@ -759,9 +781,9 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow)
{
struct flow_offload_work *offload;
- s64 delta;
+ __s32 delta;
- delta = flow->timeout - jiffies;
+ delta = nf_flow_timeout_delta(flow->timeout);
if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
flow->flags & FLOW_OFFLOAD_HW_DYING)
return;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 273f3838318b..43f05b3acd60 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5984,6 +5984,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
return ERR_PTR(-ENOENT);
}
+/* Only called from error and netdev event paths. */
static void nft_unregister_flowtable_hook(struct net *net,
struct nft_flowtable *flowtable,
struct nft_hook *hook)
@@ -5999,7 +6000,7 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list)
- nft_unregister_flowtable_hook(net, flowtable, hook);
+ nf_unregister_net_hook(net, &hook->ops);
}
static int nft_register_flowtable_net_hooks(struct net *net,
@@ -6448,12 +6449,14 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
{
struct nft_hook *hook, *next;
+ flowtable->data.type->free(&flowtable->data);
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
kfree(hook);
}
kfree(flowtable->name);
- flowtable->data.type->free(&flowtable->data);
module_put(flowtable->data.type->owner);
kfree(flowtable);
}
@@ -6497,6 +6500,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
if (hook->ops.dev != dev)
continue;
+ /* flow_offload_netdev_event() cleans up entries for us. */
nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index dd82ff2ee19f..b70b48996801 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -200,9 +200,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
- struct nft_flow_offload *priv = nft_expr_priv(expr);
-
- priv->flowtable->use--;
nf_ct_netns_put(ctx->net, ctx->family);
}
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index 4c33dfc9dab5..d67f83a0958d 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
if (priv->sreg_port)
- tport = regs->data[priv->sreg_port];
+ tport = nft_reg_load16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
@@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
if (priv->sreg_port)
- tport = regs->data[priv->sreg_port];
+ tport = nft_reg_load16(&regs->data[priv->sreg_port]);
if (!tport)
tport = hp->dest;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 88f98f27ad88..3d24d45be5f4 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = 0;
- skb_put_padto(skb, ALIGN(len, 4));
+ skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
mutex_lock(&node->ep_lock);
if (node->ep)
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 3fd5f40189bd..a792d8a3872a 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -156,6 +156,13 @@ static void rds_ib_add_one(struct ib_device *device)
has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
device->ops.map_phys_fmr && device->ops.unmap_fmr);
rds_ibdev->use_fastreg = (has_fr && !has_fmr);
+ rds_ibdev->odp_capable =
+ !!(device->attrs.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING) &&
+ !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_WRITE) &&
+ !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_READ);
rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 6e6f24753998..0296f1f7acda 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -247,7 +247,8 @@ struct rds_ib_device {
struct ib_device *dev;
struct ib_pd *pd;
struct dma_pool *rid_hdrs_pool; /* RDS headers DMA pool */
- bool use_fastreg;
+ u8 use_fastreg:1;
+ u8 odp_capable:1;
unsigned int max_mrs;
struct rds_ib_mr_pool *mr_1m_pool;
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 9045a8c0edff..0c8252d7fe2b 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -67,6 +67,7 @@ struct rds_ib_frmr {
/* This is stored as mr->r_trans_private. */
struct rds_ib_mr {
+ struct delayed_work work;
struct rds_ib_device *device;
struct rds_ib_mr_pool *pool;
struct rds_ib_connection *ic;
@@ -81,9 +82,11 @@ struct rds_ib_mr {
unsigned int sg_len;
int sg_dma_len;
+ u8 odp:1;
union {
struct rds_ib_fmr fmr;
struct rds_ib_frmr frmr;
+ struct ib_mr *mr;
} u;
};
@@ -122,12 +125,14 @@ void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
struct rds_sock *rs, u32 *key_ret,
- struct rds_connection *conn);
+ struct rds_connection *conn, u64 start, u64 length,
+ int need_odp);
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
int rds_ib_mr_init(void);
void rds_ib_mr_exit(void);
+u32 rds_ib_get_lkey(void *trans_private);
void __rds_ib_teardown_mr(struct rds_ib_mr *);
void rds_ib_teardown_mr(struct rds_ib_mr *);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index c8c1e3ae8d84..b34b24e237f8 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -37,8 +37,15 @@
#include "rds_single_path.h"
#include "ib_mr.h"
+#include "rds.h"
struct workqueue_struct *rds_ib_mr_wq;
+struct rds_ib_dereg_odp_mr {
+ struct work_struct work;
+ struct ib_mr *mr;
+};
+
+static void rds_ib_odp_mr_worker(struct work_struct *work);
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
@@ -213,6 +220,9 @@ void rds_ib_sync_mr(void *trans_private, int direction)
struct rds_ib_mr *ibmr = trans_private;
struct rds_ib_device *rds_ibdev = ibmr->device;
+ if (ibmr->odp)
+ return;
+
switch (direction) {
case DMA_FROM_DEVICE:
ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
@@ -482,6 +492,16 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
+ if (ibmr->odp) {
+ /* A MR created and marked as use_once. We use delayed work,
+ * because there is a change that we are in interrupt and can't
+ * call to ib_dereg_mr() directly.
+ */
+ INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
+ queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
+ return;
+ }
+
/* Return it to the pool's free list */
if (rds_ibdev->use_fastreg)
rds_ib_free_frmr_list(ibmr);
@@ -526,9 +546,17 @@ void rds_ib_flush_mrs(void)
up_read(&rds_ib_devices_lock);
}
+u32 rds_ib_get_lkey(void *trans_private)
+{
+ struct rds_ib_mr *ibmr = trans_private;
+
+ return ibmr->u.mr->lkey;
+}
+
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
struct rds_sock *rs, u32 *key_ret,
- struct rds_connection *conn)
+ struct rds_connection *conn,
+ u64 start, u64 length, int need_odp)
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_mr *ibmr = NULL;
@@ -541,6 +569,51 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
goto out;
}
+ if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
+ u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
+ int access_flags =
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_ON_DEMAND);
+ struct ib_sge sge = {};
+ struct ib_mr *ib_mr;
+
+ if (!rds_ibdev->odp_capable) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
+ access_flags);
+
+ if (IS_ERR(ib_mr)) {
+ rdsdebug("rds_ib_get_user_mr returned %d\n",
+ IS_ERR(ib_mr));
+ ret = PTR_ERR(ib_mr);
+ goto out;
+ }
+ if (key_ret)
+ *key_ret = ib_mr->rkey;
+
+ ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
+ if (!ibmr) {
+ ib_dereg_mr(ib_mr);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ibmr->u.mr = ib_mr;
+ ibmr->odp = 1;
+
+ sge.addr = virt_addr;
+ sge.length = length;
+ sge.lkey = ib_mr->lkey;
+
+ ib_advise_mr(rds_ibdev->pd,
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
+ IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
+ return ibmr;
+ }
+
if (conn)
ic = conn->c_transport_data;
@@ -629,3 +702,12 @@ void rds_ib_mr_exit(void)
{
destroy_workqueue(rds_ib_mr_wq);
}
+
+static void rds_ib_odp_mr_worker(struct work_struct *work)
+{
+ struct rds_ib_mr *ibmr;
+
+ ibmr = container_of(work, struct rds_ib_mr, work.work);
+ ib_dereg_mr(ibmr->u.mr);
+ kfree(ibmr);
+}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index d1cc1d7778d8..dfe778220657 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -39,6 +39,7 @@
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
+#include "ib_mr.h"
/*
* Convert IB-specific error message to RDS error message and call core
@@ -635,6 +636,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
send->s_sge[0].length = sizeof(struct rds_header);
+ send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
sizeof(struct rds_header));
@@ -650,6 +652,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
send->s_sge[1].addr = sg_dma_address(scat);
send->s_sge[1].addr += rm->data.op_dmaoff;
send->s_sge[1].length = len;
+ send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
bytes_sent += len;
rm->data.op_dmaoff += len;
@@ -858,20 +861,29 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
int ret;
int num_sge;
int nr_sig = 0;
+ u64 odp_addr = op->op_odp_addr;
+ u32 odp_lkey = 0;
/* map the op the first time we see it */
- if (!op->op_mapped) {
- op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents, (op->op_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
- if (op->op_count == 0) {
- rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
- ret = -ENOMEM; /* XXX ? */
- goto out;
+ if (!op->op_odp_mr) {
+ if (!op->op_mapped) {
+ op->op_count =
+ ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
+ op->op_nents,
+ (op->op_write) ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op,
+ op->op_count);
+ if (op->op_count == 0) {
+ rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
+ ret = -ENOMEM; /* XXX ? */
+ goto out;
+ }
+ op->op_mapped = 1;
}
-
- op->op_mapped = 1;
+ } else {
+ op->op_count = op->op_nents;
+ odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
}
/*
@@ -923,14 +935,20 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
scat != &op->op_sg[op->op_count]; j++) {
len = sg_dma_len(scat);
- send->s_sge[j].addr = sg_dma_address(scat);
+ if (!op->op_odp_mr) {
+ send->s_sge[j].addr = sg_dma_address(scat);
+ send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
+ } else {
+ send->s_sge[j].addr = odp_addr;
+ send->s_sge[j].lkey = odp_lkey;
+ }
send->s_sge[j].length = len;
- send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
sent += len;
rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
remote_addr += len;
+ odp_addr += len;
scat++;
}
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 916f5ec373d8..3341eee87bf9 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -156,11 +156,13 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
+ unsigned int gup_flags = FOLL_LONGTERM;
int ret;
- ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0,
- pages);
+ if (write)
+ gup_flags |= FOLL_WRITE;
+ ret = get_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
@@ -175,13 +177,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
struct rds_conn_path *cp)
{
struct rds_mr *mr = NULL, *found;
+ struct scatterlist *sg = NULL;
unsigned int nr_pages;
struct page **pages = NULL;
- struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
- unsigned int nents;
+ unsigned int nents = 0;
+ int need_odp = 0;
long i;
int ret;
@@ -195,6 +198,21 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
}
+ /* If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
+ PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
+ (args->vec.addr + args->vec.bytes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!can_do_mlock()) {
+ ret = -EPERM;
+ goto out;
+ }
+
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
@@ -248,36 +266,44 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
- if (ret < 0)
- goto out;
-
- nents = ret;
- sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
- if (!sg) {
- ret = -ENOMEM;
+ if (ret == -EOPNOTSUPP) {
+ need_odp = 1;
+ } else if (ret <= 0) {
goto out;
- }
- WARN_ON(!nents);
- sg_init_table(sg, nents);
-
- /* Stick all pages into the scatterlist */
- for (i = 0 ; i < nents; i++)
- sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
+ } else {
+ nents = ret;
+ sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ WARN_ON(!nents);
+ sg_init_table(sg, nents);
- rdsdebug("RDS: trans_private nents is %u\n", nents);
+ /* Stick all pages into the scatterlist */
+ for (i = 0 ; i < nents; i++)
+ sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
+ rdsdebug("RDS: trans_private nents is %u\n", nents);
+ }
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
- trans_private = rs->rs_transport->get_mr(sg, nents, rs,
- &mr->r_key,
- cp ? cp->cp_conn : NULL);
+ trans_private = rs->rs_transport->get_mr(
+ sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
+ args->vec.addr, args->vec.bytes,
+ need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
if (IS_ERR(trans_private)) {
- for (i = 0 ; i < nents; i++)
- put_page(sg_page(&sg[i]));
- kfree(sg);
+ /* In ODP case, we don't GUP pages, so don't need
+ * to release anything.
+ */
+ if (!need_odp) {
+ for (i = 0 ; i < nents; i++)
+ put_page(sg_page(&sg[i]));
+ kfree(sg);
+ }
ret = PTR_ERR(trans_private);
goto out;
}
@@ -291,7 +317,11 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
- cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
+ if (need_odp)
+ cookie = rds_rdma_make_cookie(mr->r_key, 0);
+ else
+ cookie = rds_rdma_make_cookie(mr->r_key,
+ args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
@@ -456,22 +486,26 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
- for (i = 0; i < ro->op_nents; i++) {
- struct page *page = sg_page(&ro->op_sg[i]);
-
- /* Mark page dirty if it was possibly modified, which
- * is the case for a RDMA_READ which copies from remote
- * to local memory */
- if (!ro->op_write) {
- WARN_ON(!page->mapping && irqs_disabled());
- set_page_dirty(page);
+ if (ro->op_odp_mr) {
+ rds_mr_put(ro->op_odp_mr);
+ } else {
+ for (i = 0; i < ro->op_nents; i++) {
+ struct page *page = sg_page(&ro->op_sg[i]);
+
+ /* Mark page dirty if it was possibly modified, which
+ * is the case for a RDMA_READ which copies from remote
+ * to local memory
+ */
+ if (!ro->op_write)
+ set_page_dirty(page);
+ put_page(page);
}
- put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
+ ro->op_odp_mr = NULL;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
@@ -581,6 +615,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct rds_iovec *iovs;
unsigned int i, j;
int ret = 0;
+ bool odp_supported = true;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
@@ -602,6 +637,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
ret = -EINVAL;
goto out_ret;
}
+ /* odp-mr is not supported for multiple requests within one message */
+ if (args->nr_local != 1)
+ odp_supported = false;
iovs = vec->iov;
@@ -623,6 +661,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
+ op->op_odp_mr = NULL;
+
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
if (!op->op_sg)
@@ -672,10 +712,44 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
- if (ret < 0)
+ if ((!odp_supported && ret <= 0) ||
+ (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
goto out_pages;
- else
- ret = 0;
+
+ if (ret == -EOPNOTSUPP) {
+ struct rds_mr *local_odp_mr;
+
+ if (!rs->rs_transport->get_mr) {
+ ret = -EOPNOTSUPP;
+ goto out_pages;
+ }
+ local_odp_mr =
+ kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
+ if (!local_odp_mr) {
+ ret = -ENOMEM;
+ goto out_pages;
+ }
+ RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
+ refcount_set(&local_odp_mr->r_refcount, 1);
+ local_odp_mr->r_trans = rs->rs_transport;
+ local_odp_mr->r_sock = rs;
+ local_odp_mr->r_trans_private =
+ rs->rs_transport->get_mr(
+ NULL, 0, rs, &local_odp_mr->r_key, NULL,
+ iov->addr, iov->bytes, ODP_VIRTUAL);
+ if (IS_ERR(local_odp_mr->r_trans_private)) {
+ ret = IS_ERR(local_odp_mr->r_trans_private);
+ rdsdebug("get_mr ret %d %p\"", ret,
+ local_odp_mr->r_trans_private);
+ kfree(local_odp_mr);
+ ret = -EOPNOTSUPP;
+ goto out_pages;
+ }
+ rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
+ local_odp_mr, local_odp_mr->r_trans_private);
+ op->op_odp_mr = local_odp_mr;
+ op->op_odp_addr = iov->addr;
+ }
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
@@ -691,6 +765,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
+ sg_dma_len(sg) = sg->length;
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
@@ -709,6 +784,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out_pages;
}
op->op_bytes = nr_bytes;
+ ret = 0;
out_pages:
kfree(pages);
@@ -755,7 +831,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
- mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
+ mr->r_trans->sync_mr(mr->r_trans_private,
+ DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 53e86911773a..e4a603523083 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -40,7 +40,6 @@
#ifdef ATOMIC64_INIT
#define KERNEL_HAS_ATOMIC64
#endif
-
#ifdef RDS_DEBUG
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
#else
@@ -478,6 +477,9 @@ struct rds_message {
struct rds_notifier *op_notifier;
struct rds_mr *op_rdma_mr;
+
+ u64 op_odp_addr;
+ struct rds_mr *op_odp_mr;
} rdma;
struct rm_data_op {
unsigned int op_active:1;
@@ -573,7 +575,8 @@ struct rds_transport {
void (*exit)(void);
void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
struct rds_sock *rs, u32 *key_ret,
- struct rds_connection *conn);
+ struct rds_connection *conn,
+ u64 start, u64 length, int need_odp);
void (*sync_mr)(void *trans_private, int direction);
void (*free_mr)(void *trans_private, int invalidate);
void (*flush_mrs)(void);
@@ -956,6 +959,12 @@ static inline bool rds_destroy_pending(struct rds_connection *conn)
(conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
}
+enum {
+ ODP_NOT_NEEDED,
+ ODP_ZEROBASED,
+ ODP_VIRTUAL
+};
+
/* stats.c */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
#define rds_stats_inc_which(which, member) do { \
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7c7d10f2e0c1..5e99df80e80a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -209,6 +209,7 @@ struct rxrpc_skb_priv {
struct rxrpc_security {
const char *name; /* name of this service */
u8 security_index; /* security type provided */
+ u32 no_key_abort; /* Abort code indicating no key */
/* Initialise a security service */
int (*init)(void);
@@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
-void rxrpc_new_incoming_connection(struct rxrpc_sock *,
- struct rxrpc_connection *, struct sk_buff *);
+void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
+ const struct rxrpc_security *, struct key *,
+ struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
@@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad;
int __init rxrpc_init_security(void);
void rxrpc_exit_security(void);
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
+ const struct rxrpc_security **, struct key **,
+ struct sk_buff *);
/*
* sendmsg.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 135bf5cd8dd5..70e44abf106c 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -240,6 +240,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
}
/*
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
+ */
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ ktime_t now = skb->tstamp;
+
+ if (call->peer->rtt_usage < 3 ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ true, true,
+ rxrpc_propose_ack_ping_for_params);
+}
+
+/*
* Allocate a new incoming call from the prealloc pool, along with a connection
* and a peer as necessary.
*/
@@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
+ const struct rxrpc_security *sec,
+ struct key *key,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
@@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
conn->params.local = rxrpc_get_local(local);
conn->params.peer = peer;
rxrpc_see_connection(conn);
- rxrpc_new_incoming_connection(rx, conn, skb);
+ rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
} else {
rxrpc_get_connection(conn);
}
@@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ const struct rxrpc_security *sec = NULL;
struct rxrpc_connection *conn;
struct rxrpc_peer *peer = NULL;
- struct rxrpc_call *call;
+ struct rxrpc_call *call = NULL;
+ struct key *key = NULL;
_enter("");
@@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [close]");
- call = NULL;
- goto out;
+ goto no_call;
}
/* The peer, connection and call may all have sprung into existence due
@@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
conn = rxrpc_find_connection_rcu(local, skb, &peer);
- call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
+ if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
+ goto no_call;
+
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
+ key_put(key);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
- _leave(" = NULL [busy]");
- call = NULL;
- goto out;
+ goto no_call;
}
trace_rxrpc_receive(call, rxrpc_receive_incoming,
sp->hdr.serial, sp->hdr.seq);
- /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
- * sendmsg()/recvmsg() inconveniently stealing the mutex once the
- * notification is generated.
- *
- * The BUG should never happen because the kernel should be well
- * behaved enough not to access the call before the first notification
- * event and userspace is prevented from doing so until the state is
- * appropriate.
- */
- if (!mutex_trylock(&call->user_mutex))
- BUG();
-
/* Make the call live. */
rxrpc_incoming_call(rx, call, skb);
conn = call->conn;
@@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
BUG();
}
spin_unlock(&conn->state_lock);
+ spin_unlock(&rx->incoming_lock);
+
+ rxrpc_send_ping(call, skb);
if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
rxrpc_notify_socket(call);
@@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %p{%d}", call, call->debug_id);
-out:
- spin_unlock(&rx->incoming_lock);
return call;
+
+no_call:
+ spin_unlock(&rx->incoming_lock);
+ _leave(" = NULL [%u]", skb->mark);
+ return NULL;
}
/*
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index a1ceef4f5cd0..808a4723f868 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
_enter("{%d}", conn->debug_id);
ASSERT(conn->security_ix != 0);
-
- if (!conn->params.key) {
- _debug("set up security");
- ret = rxrpc_init_server_conn_security(conn);
- switch (ret) {
- case 0:
- break;
- case -ENOENT:
- abort_code = RX_CALL_DEAD;
- goto abort;
- default:
- abort_code = RXKADNOAUTH;
- goto abort;
- }
- }
+ ASSERT(conn->server_key);
if (conn->security->issue_challenge(conn) < 0) {
abort_code = RX_CALL_DEAD;
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 123d6ceab15c..21da48e3d2e5 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
*/
void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
struct rxrpc_connection *conn,
+ const struct rxrpc_security *sec,
+ struct key *key,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
conn->service_id = sp->hdr.serviceId;
conn->security_ix = sp->hdr.securityIndex;
conn->out_clientflag = 0;
+ conn->security = sec;
+ conn->server_key = key_get(key);
if (conn->security_ix)
conn->state = RXRPC_CONN_SERVICE_UNSECURED;
else
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 157be1ff8697..86bd133b4fa0 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -193,22 +193,6 @@ send_extra_data:
}
/*
- * Ping the other end to fill our RTT cache and to retrieve the rwind
- * and MTU parameters.
- */
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- ktime_t now = skb->tstamp;
-
- if (call->peer->rtt_usage < 3 ||
- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
- true, true,
- rxrpc_propose_ack_ping_for_params);
-}
-
-/*
* Apply a hard ACK by advancing the Tx window.
*/
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
- rxrpc_send_ping(call, skb);
- mutex_unlock(&call->user_mutex);
}
/* Process a call packet; this either discards or passes on the ref
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 8d8aa3c230b5..098f1f9ec53b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
u32 serial;
int ret;
- _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
+ _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
- ret = key_validate(conn->params.key);
+ ret = key_validate(conn->server_key);
if (ret < 0)
return ret;
@@ -1293,6 +1293,7 @@ static void rxkad_exit(void)
const struct rxrpc_security rxkad = {
.name = "rxkad",
.security_index = RXRPC_SECURITY_RXKAD,
+ .no_key_abort = RXKADUNKNOWNKEY,
.init = rxkad_init,
.exit = rxkad_exit,
.init_connection_security = rxkad_init_connection_security,
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index a4c47d2b7054..9b1fb9ed0717 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
}
/*
- * initialise the security on a server connection
+ * Find the security key for a server connection.
*/
-int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
+bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
+ const struct rxrpc_security **_sec,
+ struct key **_key,
+ struct sk_buff *skb)
{
const struct rxrpc_security *sec;
- struct rxrpc_local *local = conn->params.local;
- struct rxrpc_sock *rx;
- struct key *key;
- key_ref_t kref;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ key_ref_t kref = NULL;
char kdesc[5 + 1 + 3 + 1];
_enter("");
- sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
+ sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
- sec = rxrpc_security_lookup(conn->security_ix);
+ sec = rxrpc_security_lookup(sp->hdr.securityIndex);
if (!sec) {
- _leave(" = -ENOKEY [lookup]");
- return -ENOKEY;
+ trace_rxrpc_abort(0, "SVS",
+ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EKEYREJECTED);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ return false;
}
- /* find the service */
- read_lock(&local->services_lock);
- rx = rcu_dereference_protected(local->service,
- lockdep_is_held(&local->services_lock));
- if (rx && (rx->srx.srx_service == conn->service_id ||
- rx->second_service == conn->service_id))
- goto found_service;
+ if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
+ goto out;
- /* the service appears to have died */
- read_unlock(&local->services_lock);
- _leave(" = -ENOENT");
- return -ENOENT;
-
-found_service:
if (!rx->securities) {
- read_unlock(&local->services_lock);
- _leave(" = -ENOKEY");
- return -ENOKEY;
+ trace_rxrpc_abort(0, "SVR",
+ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EKEYREJECTED);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ return false;
}
/* look through the service's keyring */
kref = keyring_search(make_key_ref(rx->securities, 1UL),
&key_type_rxrpc_s, kdesc, true);
if (IS_ERR(kref)) {
- read_unlock(&local->services_lock);
- _leave(" = %ld [search]", PTR_ERR(kref));
- return PTR_ERR(kref);
+ trace_rxrpc_abort(0, "SVK",
+ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ sec->no_key_abort, EKEYREJECTED);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = sec->no_key_abort;
+ return false;
}
- key = key_ref_to_ptr(kref);
- read_unlock(&local->services_lock);
-
- conn->server_key = key;
- conn->security = sec;
-
- _leave(" = 0");
- return 0;
+out:
+ *_sec = sec;
+ *_key = key_ref_to_ptr(kref);
+ return true;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 1e3eb3a97532..1ad300e6dbc0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
bool use_reinsert;
bool want_ingress;
bool is_redirect;
+ bool expects_nh;
int m_eaction;
int mac_len;
+ bool at_nh;
rec_level = __this_cpu_inc_return(mirred_rec_level);
if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
@@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
- /* If action's target direction differs than filter's direction,
- * and devices expect a mac header on xmit, then mac push/pull is
- * needed.
- */
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
- if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
- if (!skb_at_tc_ingress(skb)) {
- /* caught at egress, act ingress: pull mac */
- mac_len = skb_network_header(skb) - skb_mac_header(skb);
+
+ expects_nh = want_ingress || !m_mac_header_xmit;
+ at_nh = skb->data == skb_network_header(skb);
+ if (at_nh != expects_nh) {
+ mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
+ skb_network_header(skb) - skb_mac_header(skb);
+ if (expects_nh) {
+ /* target device/action expect data at nh */
skb_pull_rcsum(skb2, mac_len);
} else {
- /* caught at ingress, act egress: push mac */
- skb_push_rcsum(skb2, skb->mac_len);
+ /* target device/action expect data at mac */
+ skb_push_rcsum(skb2, mac_len);
}
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 6a0eacafdb19..76e0d122616a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
tcf_proto_destroy(tp, rtnl_held, true, extack);
}
-static int walker_check_empty(struct tcf_proto *tp, void *fh,
- struct tcf_walker *arg)
+static bool tcf_proto_check_delete(struct tcf_proto *tp)
{
- if (fh) {
- arg->nonempty = true;
- return -1;
- }
- return 0;
-}
-
-static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
-{
- struct tcf_walker walker = { .fn = walker_check_empty, };
-
- if (tp->ops->walk) {
- tp->ops->walk(tp, &walker, rtnl_held);
- return !walker.nonempty;
- }
- return true;
-}
+ if (tp->ops->delete_empty)
+ return tp->ops->delete_empty(tp);
-static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
-{
- spin_lock(&tp->lock);
- if (tcf_proto_is_empty(tp, rtnl_held))
- tp->deleting = true;
- spin_unlock(&tp->lock);
+ tp->deleting = true;
return tp->deleting;
}
@@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
* concurrently.
* Mark tp for deletion if it is empty.
*/
- if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
+ if (!tp_iter || !tcf_proto_check_delete(tp)) {
mutex_unlock(&chain->filter_chain_lock);
return;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 0d125de54285..b0f42e62dd76 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2773,6 +2773,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
f->res.class = cl;
}
+static bool fl_delete_empty(struct tcf_proto *tp)
+{
+ struct cls_fl_head *head = fl_head_dereference(tp);
+
+ spin_lock(&tp->lock);
+ tp->deleting = idr_is_empty(&head->handle_idr);
+ spin_unlock(&tp->lock);
+
+ return tp->deleting;
+}
+
static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.kind = "flower",
.classify = fl_classify,
@@ -2782,6 +2793,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.put = fl_put,
.change = fl_change,
.delete = fl_delete,
+ .delete_empty = fl_delete_empty,
.walk = fl_walk,
.reoffload = fl_reoffload,
.hw_add = fl_hw_add,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 66c6bcec16cb..a0e6fac613de 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1108,33 +1108,10 @@ erridr:
return err;
}
-static bool u32_hnode_empty(struct tc_u_hnode *ht, bool *non_root_ht)
-{
- int i;
-
- if (!ht)
- return true;
- if (!ht->is_root) {
- *non_root_ht = true;
- return false;
- }
- if (*non_root_ht)
- return false;
- if (ht->refcnt < 2)
- return true;
-
- for (i = 0; i <= ht->divisor; i++) {
- if (rtnl_dereference(ht->ht[i]))
- return false;
- }
- return true;
-}
-
static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
struct tc_u_common *tp_c = tp->data;
- bool non_root_ht = false;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
@@ -1147,8 +1124,6 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
- if (u32_hnode_empty(ht, &non_root_ht))
- return;
if (arg->count >= arg->skip) {
if (arg->fn(tp, ht, arg) < 0) {
arg->stop = 1;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index e0f40400f679..2277369feae5 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1769,7 +1769,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->avg_window_begin));
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
- do_div(b, window_interval);
+ b = div64_u64(b, window_interval);
q->avg_peak_bandwidth =
cake_ewma(q->avg_peak_bandwidth, b,
b > q->avg_peak_bandwidth ? 2 : 8);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index b1c7e726ce5d..a5a295477ecc 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash;
+ if (q->rate_enable)
+ smp_store_release(&sk->sk_pacing_status,
+ SK_PACING_FQ);
if (fq_flow_is_throttled(f))
fq_flow_unset_throttled(q, f);
f->time_next_packet = 0ULL;
@@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
fq_flow_set_detached(f);
f->sk = sk;
- if (skb->sk == sk)
+ if (skb->sk == sk) {
f->socket_hash = sk->sk_hash;
+ if (q->rate_enable)
+ smp_store_release(&sk->sk_pacing_status,
+ SK_PACING_FQ);
+ }
f->credit = q->initial_quantum;
rb_link_node(&f->fq_node, parent, p);
@@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
f->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) {
- struct sock *sk = skb->sk;
-
fq_flow_add_tail(&q->new_flows, f);
if (time_after(jiffies, f->age + q->flow_refill_delay))
f->credit = max_t(u32, f->credit, q->quantum);
- if (sk && q->rate_enable) {
- if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
- SK_PACING_FQ))
- smp_store_release(&sk->sk_pacing_status,
- SK_PACING_FQ);
- }
q->inactive_flows--;
}
@@ -787,10 +786,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
- if (quantum > 0)
+ if (quantum > 0 && quantum <= (1 << 20)) {
q->quantum = quantum;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
err = -EINVAL;
+ }
}
if (tb[TCA_FQ_INITIAL_QUANTUM])
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 18b884cfdfe8..647941702f9f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -292,8 +292,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct tc_prio_qopt_offload graft_offload;
unsigned long band = arg - 1;
- if (new == NULL)
- new = &noop_qdisc;
+ if (!new) {
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, arg), extack);
+ if (!new)
+ new = &noop_qdisc;
+ else
+ qdisc_hash_add(new, true);
+ }
*old = qdisc_replace(sch, new, &q->queues[band]);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index acd737d4c0e0..834e9f82afed 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1363,8 +1363,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1386,7 +1388,8 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
if (!new_obj) {
if (cmd->obj.chunk)
sctp_chunk_free(cmd->obj.chunk);
- goto nomem;
+ error = -ENOMEM;
+ break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1433,8 +1436,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc, chunk);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
break;
@@ -1770,11 +1775,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
break;
}
- if (error)
+ if (error) {
+ cmd = sctp_next_cmd(commands);
+ while (cmd) {
+ if (cmd->verb == SCTP_CMD_REPLY)
+ sctp_chunk_free(cmd->obj.chunk);
+ cmd = sctp_next_cmd(commands);
+ }
break;
+ }
}
-out:
/* If this is in response to a received chunk, wait until
* we are done with the packet to open the queue so that we don't
* send multiple packets in response to a single request.
@@ -1789,7 +1800,4 @@ out:
sp->data_ready_signalled = 0;
return error;
-nomem:
- error = -ENOMEM;
- goto out;
}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 6a30392068a0..c1a100d2fed3 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,10 +84,8 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
return 0;
ret = genradix_prealloc(&stream->out, outcnt, gfp);
- if (ret) {
- genradix_free(&stream->out);
+ if (ret)
return ret;
- }
stream->outcnt = outcnt;
return 0;
@@ -102,10 +100,8 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
return 0;
ret = genradix_prealloc(&stream->in, incnt, gfp);
- if (ret) {
- genradix_free(&stream->in);
+ if (ret)
return ret;
- }
stream->incnt = incnt;
return 0;
@@ -123,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
* a new one with new outcnt to save memory if needed.
*/
if (outcnt == stream->outcnt)
- goto in;
+ goto handle_in;
/* Filter out chunks queued on streams that won't exist anymore */
sched->unsched_all(stream);
@@ -132,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
ret = sctp_stream_alloc_out(stream, outcnt, gfp);
if (ret)
- goto out;
+ goto out_err;
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
-in:
+handle_in:
sctp_stream_interleave_init(stream);
if (!incnt)
goto out;
ret = sctp_stream_alloc_in(stream, incnt, gfp);
- if (ret) {
- sched->free(stream);
- genradix_free(&stream->out);
- stream->outcnt = 0;
- goto out;
- }
+ if (ret)
+ goto in_err;
+
+ goto out;
+in_err:
+ sched->free(stream);
+ genradix_free(&stream->in);
+out_err:
+ genradix_free(&stream->out);
+ stream->outcnt = 0;
out:
return ret;
}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 7235a6032671..3bbe1a58ec87 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
pf->af->from_sk(&addr, sk);
pf->to_sk_daddr(&t->ipaddr, sk);
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
pf->to_sk_daddr(&addr, sk);
dst = sctp_transport_dst_check(t);
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 11255e970dd4..ee49a9f1dd4f 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,7 +9,7 @@ tipc-y += addr.o bcast.o bearer.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o monitor.o name_table.o net.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
- topsrv.o socket.o group.o trace.o
+ topsrv.o group.o trace.o
CFLAGS_trace.o += -I$(src)
@@ -20,5 +20,3 @@ tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
obj-$(CONFIG_TIPC_DIAG) += diag.o
-
-tipc_diag-y := diag.o
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 0254bb7e418b..217516357ef2 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -204,8 +204,8 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
}
- attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
- sizeof(struct nlattr *), GFP_KERNEL);
+ attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf) {
err = -ENOMEM;
goto err_out;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 6552f986774c..f9b4fb92c0b1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -287,12 +287,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
*
* Caller must hold socket lock
*/
-static void tsk_rej_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk, int error)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
- tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
+ tipc_sk_respond(sk, skb, error);
}
static bool tipc_sk_connected(struct sock *sk)
@@ -545,34 +545,45 @@ static void __tipc_shutdown(struct socket *sock, int error)
/* Remove pending SYN */
__skb_queue_purge(&sk->sk_write_queue);
- /* Reject all unreceived messages, except on an active connection
- * (which disconnects locally & sends a 'FIN+' to peer).
- */
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- if (TIPC_SKB_CB(skb)->bytes_read) {
- kfree_skb(skb);
- continue;
- }
- if (!tipc_sk_type_connectionless(sk) &&
- sk->sk_state != TIPC_DISCONNECTING) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- }
- tipc_sk_respond(sk, skb, error);
+ /* Remove partially received buffer if any */
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
}
- if (tipc_sk_type_connectionless(sk))
+ /* Reject all unreceived messages if connectionless */
+ if (tipc_sk_type_connectionless(sk)) {
+ tsk_rej_rx_queue(sk, error);
return;
+ }
- if (sk->sk_state != TIPC_DISCONNECTING) {
+ switch (sk->sk_state) {
+ case TIPC_CONNECTING:
+ case TIPC_ESTABLISHED:
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
+ /* Send a FIN+/- to its peer */
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb) {
+ __skb_queue_purge(&sk->sk_receive_queue);
+ tipc_sk_respond(sk, skb, error);
+ break;
+ }
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ break;
+ case TIPC_LISTEN:
+ /* Reject all SYN messages */
+ tsk_rej_rx_queue(sk, error);
+ break;
+ default:
+ __skb_queue_purge(&sk->sk_receive_queue);
+ break;
}
}
@@ -2432,8 +2443,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
return sock_intr_errno(*timeo_p);
add_wait_queue(sk_sleep(sk), &wait);
- done = sk_wait_event(sk, timeo_p,
- sk->sk_state != TIPC_CONNECTING, &wait);
+ done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+ &wait);
remove_wait_queue(sk_sleep(sk), &wait);
} while (!done);
return 0;
@@ -2643,7 +2654,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
- tsk_rej_rx_queue(new_sk);
+ tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
/* Connect new socket to it's peer */
tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c
index 6d0125ca8af7..20291ec6489f 100644
--- a/samples/seccomp/user-trap.c
+++ b/samples/seccomp/user-trap.c
@@ -298,14 +298,14 @@ int main(void)
req = malloc(sizes.seccomp_notif);
if (!req)
goto out_close;
- memset(req, 0, sizeof(*req));
resp = malloc(sizes.seccomp_notif_resp);
if (!resp)
goto out_req;
- memset(resp, 0, sizeof(*resp));
+ memset(resp, 0, sizes.seccomp_notif_resp);
while (1) {
+ memset(req, 0, sizes.seccomp_notif);
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
perror("ioctl recv");
goto out_resp;
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index d33de0b9f4f5..e3569543bdac 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS
An arch should select this symbol if it supports building with
GCC plugins.
-config GCC_PLUGINS
- bool
+menuconfig GCC_PLUGINS
+ bool "GCC plugins"
depends on HAVE_GCC_PLUGINS
depends on PLUGIN_HOSTCC != ""
default y
@@ -25,8 +25,7 @@ config GCC_PLUGINS
See Documentation/core-api/gcc-plugins.rst for details.
-menu "GCC plugins"
- depends on GCC_PLUGINS
+if GCC_PLUGINS
config GCC_PLUGIN_CYC_COMPLEXITY
bool "Compute the cyclomatic complexity of a function" if EXPERT
@@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK
bool
depends on GCC_PLUGINS && ARM
-endmenu
+endif
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 7c230016b08d..357dc56bcf30 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -136,7 +136,7 @@ mkdir -p debian/source/
echo "1.0" > debian/source/format
echo $debarch > debian/arch
-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
+extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
# Generate a simple changelog template
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 09996f2552ee..47aff8700547 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -623,7 +623,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
void __aa_bump_ns_revision(struct aa_ns *ns)
{
- ns->revision++;
+ WRITE_ONCE(ns->revision, ns->revision + 1);
wake_up_interruptible(&ns->wait);
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 9be7ccb8379e..6ceb74e0f789 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
if (!bprm || !profile->xattr_count)
return 0;
+ might_sleep();
/* transition from exec match to xattr set */
state = aa_dfa_null_transition(profile->xmatch, state);
@@ -361,10 +362,11 @@ out:
}
/**
- * __attach_match_ - find an attachment match
+ * find_attach - do attachment search for unconfined processes
* @bprm - binprm structure of transitioning task
- * @name - to match against (NOT NULL)
+ * @ns: the current namespace (NOT NULL)
* @head - profile list to walk (NOT NULL)
+ * @name - to match against (NOT NULL)
* @info - info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
@@ -374,12 +376,11 @@ out:
*
* Requires: @head not be shared or have appropriate locks held
*
- * Returns: profile or NULL if no match found
+ * Returns: label or NULL if no match found
*/
-static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
- const char *name,
- struct list_head *head,
- const char **info)
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+ struct aa_ns *ns, struct list_head *head,
+ const char *name, const char **info)
{
int candidate_len = 0, candidate_xattrs = 0;
bool conflict = false;
@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
AA_BUG(!name);
AA_BUG(!head);
+ rcu_read_lock();
+restart:
list_for_each_entry_rcu(profile, head, base.list) {
if (profile->label.flags & FLAG_NULL &&
&profile->label == ns_unconfined(profile->ns))
@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
perm = dfa_user_allow(profile->xmatch, state);
/* any accepting state means a valid match. */
if (perm & MAY_EXEC) {
- int ret;
+ int ret = 0;
if (count < candidate_len)
continue;
- ret = aa_xattrs_match(bprm, profile, state);
- /* Fail matching if the xattrs don't match */
- if (ret < 0)
- continue;
-
+ if (bprm && profile->xattr_count) {
+ long rev = READ_ONCE(ns->revision);
+
+ if (!aa_get_profile_not0(profile))
+ goto restart;
+ rcu_read_unlock();
+ ret = aa_xattrs_match(bprm, profile,
+ state);
+ rcu_read_lock();
+ aa_put_profile(profile);
+ if (rev !=
+ READ_ONCE(ns->revision))
+ /* policy changed */
+ goto restart;
+ /*
+ * Fail matching if the xattrs don't
+ * match
+ */
+ if (ret < 0)
+ continue;
+ }
/*
* TODO: allow for more flexible best match
*
@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
candidate_xattrs = ret;
conflict = false;
}
- } else if (!strcmp(profile->base.name, name))
+ } else if (!strcmp(profile->base.name, name)) {
/*
* old exact non-re match, without conditionals such
* as xattrs. no more searching required
*/
- return profile;
+ candidate = profile;
+ goto out;
+ }
}
- if (conflict) {
- *info = "conflicting profile attachments";
+ if (!candidate || conflict) {
+ if (conflict)
+ *info = "conflicting profile attachments";
+ rcu_read_unlock();
return NULL;
}
- return candidate;
-}
-
-/**
- * find_attach - do attachment search for unconfined processes
- * @bprm - binprm structure of transitioning task
- * @ns: the current namespace (NOT NULL)
- * @list: list to search (NOT NULL)
- * @name: the executable name to match against (NOT NULL)
- * @info: info message if there was an error
- *
- * Returns: label or NULL if no match found
- */
-static struct aa_label *find_attach(const struct linux_binprm *bprm,
- struct aa_ns *ns, struct list_head *list,
- const char *name, const char **info)
-{
- struct aa_profile *profile;
-
- rcu_read_lock();
- profile = aa_get_profile(__attach_match(bprm, name, list, info));
+out:
+ candidate = aa_get_newest_profile(candidate);
rcu_read_unlock();
- return profile ? &profile->label : NULL;
+ return &candidate->label;
}
static const char *next_name(int xtype, const char *name)
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index fe2ebe5e865e..f1caf3674e1c 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -618,8 +618,7 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
fctx = file_ctx(file);
rcu_read_lock();
- flabel = aa_get_newest_label(rcu_dereference(fctx->label));
- rcu_read_unlock();
+ flabel = rcu_dereference(fctx->label);
AA_BUG(!flabel);
/* revalidate access, if task is unconfined, or the cached cred
@@ -631,9 +630,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
*/
denied = request & ~fctx->allow;
if (unconfined(label) || unconfined(flabel) ||
- (!denied && aa_label_is_subset(flabel, label)))
+ (!denied && aa_label_is_subset(flabel, label))) {
+ rcu_read_unlock();
goto done;
+ }
+ flabel = aa_get_newest_label(flabel);
+ rcu_read_unlock();
/* TODO: label cross check */
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
@@ -643,8 +646,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
else if (S_ISSOCK(file_inode(file)->i_mode))
error = __file_sock_perm(op, label, flabel, file, request,
denied);
-done:
aa_put_label(flabel);
+
+done:
return error;
}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 4ed6688f9d40..e0828ee7a345 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -442,7 +442,7 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
buffer = aa_get_buffer(false);
old_buffer = aa_get_buffer(false);
error = -ENOMEM;
- if (!buffer || old_buffer)
+ if (!buffer || !old_buffer)
goto out;
error = fn_for_each_confined(label, profile,
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 03104830c913..269f2f53c0b1 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1125,8 +1125,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
mutex_lock_nested(&ns->parent->lock, ns->level);
- __aa_remove_ns(ns);
__aa_bump_ns_revision(ns);
+ __aa_remove_ns(ns);
mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
@@ -1138,9 +1138,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
goto fail_ns_lock;
}
name = profile->base.hname;
+ __aa_bump_ns_revision(ns);
__remove_profile(profile);
__aa_labelset_update_subtree(ns);
- __aa_bump_ns_revision(ns);
mutex_unlock(&ns->lock);
}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index dd3d5942e669..c36bafbcd77e 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
exe = tomoyo_get_exe();
if (!exe)
return false;
- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (!ptr->head.is_deleted &&
(!tomoyo_pathcmp(domainname, ptr->manager) ||
!strcmp(exe, ptr->manager->name))) {
@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -EINTR;
/* Is there an active domain? */
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void)
tomoyo_policy_loaded = true;
pr_info("TOMOYO: 2.6.0\n");
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
const u8 profile = domain->profile;
struct tomoyo_policy_namespace *ns = domain->ns;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 8526a0a74023..7869d6a9980b 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
- list_for_each_entry_rcu(entry, list, list) {
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!check_duplicate(entry, new_entry))
@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
}
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- list_for_each_entry_rcu(entry, list, list) {
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
u16 i = 0;
retry:
- list_for_each_entry_rcu(ptr, list, list) {
+ list_for_each_entry_rcu(ptr, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->is_deleted || ptr->type != r->param_type)
continue;
if (!check_entry(r, ptr))
@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
{
const struct tomoyo_transition_control *ptr;
- list_for_each_entry_rcu(ptr, list, head.list) {
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->head.is_deleted || ptr->type != type)
continue;
if (ptr->domainname) {
@@ -735,7 +739,8 @@ retry:
/* Check 'aggregator' directive. */
candidate = &exename;
- list_for_each_entry_rcu(ptr, list, head.list) {
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->head.is_deleted ||
!tomoyo_path_matches_pattern(&exename,
ptr->original_name))
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index a37c7dc66e44..1cecdd797597 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
{
struct tomoyo_path_group *member;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (!tomoyo_path_matches_pattern(pathname, member->member_name))
@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
struct tomoyo_number_group *member;
bool matched = false;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (min > member->number.values[1] ||
@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
bool matched = false;
const u8 size = is_ipv6 ? 16 : 4;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (member->address.is_ipv6 != is_ipv6)
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index e7832448d721..bf38fc1b59b2 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -218,31 +218,6 @@ out:
}
/**
- * tomoyo_get_socket_name - Get the name of a socket.
- *
- * @path: Pointer to "struct path".
- * @buffer: Pointer to buffer to return value in.
- * @buflen: Sizeof @buffer.
- *
- * Returns the buffer.
- */
-static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
- const int buflen)
-{
- struct inode *inode = d_backing_inode(path->dentry);
- struct socket *sock = inode ? SOCKET_I(inode) : NULL;
- struct sock *sk = sock ? sock->sk : NULL;
-
- if (sk) {
- snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
- sk->sk_family, sk->sk_type, sk->sk_protocol);
- } else {
- snprintf(buffer, buflen, "socket:[unknown]");
- }
- return buffer;
-}
-
-/**
* tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
@@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
break;
/* To make sure that pos is '\0' terminated. */
buf[buf_len - 1] = '\0';
- /* Get better name for socket. */
- if (sb->s_magic == SOCKFS_MAGIC) {
- pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
- goto encode;
- }
- /* For "pipe:[\$]". */
+ /* For "pipe:[\$]" and "socket:[\$]". */
if (dentry->d_op && dentry->d_op->d_dname) {
pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
goto encode;
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 52752e1a84ed..eba0b3395851 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
return false;
if (!domain)
return true;
- list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
u16 perm;
u8 i;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 906b1e20bae0..286361ecd640 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = {
.reg_write = hda_reg_write,
.use_single_read = true,
.use_single_write = true,
- .disable_locking = true,
};
/**
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b856b89378ac..5b92f290cbb0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -282,12 +282,13 @@ enum {
/* quirks for old Intel chipsets */
#define AZX_DCAPS_INTEL_ICH \
- (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE)
+ (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\
+ AZX_DCAPS_SYNC_WRITE)
/* quirks for Intel PCH */
#define AZX_DCAPS_INTEL_PCH_BASE \
(AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
- AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
/* PCH up to IVB; no runtime PM; bind with i915 gfx */
#define AZX_DCAPS_INTEL_PCH_NOPM \
@@ -302,13 +303,13 @@ enum {
#define AZX_DCAPS_INTEL_HASWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
/* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
#define AZX_DCAPS_INTEL_BROADWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
#define AZX_DCAPS_INTEL_BAYTRAIL \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
@@ -1410,7 +1411,17 @@ static bool atpx_present(void)
acpi_handle dhandle, atpx_handle;
acpi_status status;
- while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (dhandle) {
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+ if (!ACPI_FAILURE(status)) {
+ pci_dev_put(pdev);
+ return true;
+ }
+ }
+ }
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev);
if (dhandle) {
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dbfafee97931..f2ea3528bfb1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -412,6 +412,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0672:
alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
break;
+ case 0x10ec0222:
case 0x10ec0623:
alc_update_coef_idx(codec, 0x19, 1<<13, 0);
break;
@@ -430,6 +431,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0899:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1168:
case 0x10ec1220:
alc_update_coef_idx(codec, 0x7, 1<<1, 0);
@@ -501,6 +503,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
switch (codec->core.vendor_id) {
+ case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
case 0x10ec0298:
@@ -2525,6 +2528,7 @@ static int patch_alc882(struct hda_codec *codec)
case 0x10ec0882:
case 0x10ec0885:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1220:
break;
default:
@@ -5904,9 +5908,12 @@ enum {
ALC256_FIXUP_ASUS_HEADSET_MIC,
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC299_FIXUP_PREDATOR_SPK,
- ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
- ALC294_FIXUP_ASUS_INTSPK_GPIO,
+ ALC289_FIXUP_DELL_SPK2,
+ ALC289_FIXUP_DUAL_SPK,
+ ALC294_FIXUP_SPK2_TO_DAC1,
+ ALC294_FIXUP_ASUS_DUAL_SPK,
+
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6981,33 +6988,45 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
}
},
- [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
- { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+ { 0x19, 0x04a11040 },
+ { 0x21, 0x04211020 },
{ }
},
.chained = true,
- .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
},
- [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+ [ALC289_FIXUP_DELL_SPK2] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- { 0x19, 0x04a11040 },
- { 0x21, 0x04211020 },
+ { 0x17, 0x90170130 }, /* bass spk */
{ }
},
.chained = true,
- .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
},
- [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
+ [ALC289_FIXUP_DUAL_SPK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC289_FIXUP_DELL_SPK2
+ },
+ [ALC294_FIXUP_SPK2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
+ [ALC294_FIXUP_ASUS_DUAL_SPK] = {
.type = HDA_FIXUP_FUNC,
/* The GPIO must be pulled to initialize the AMP */
.v.func = alc_fixup_gpio4,
.chained = true,
- .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
+ .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
+
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7080,6 +7099,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7167,7 +7188,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
@@ -7239,6 +7260,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -9237,6 +9259,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+ HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
{} /* terminator */
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index c80a16ee6e76..242542e23d28 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
unsigned long flags;
unsigned char mclk_change;
unsigned int i, old_rate;
+ bool call_set_rate = false;
if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
return -EINVAL;
@@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
* setting clock rate for internal clock mode */
old_rate = ice->get_rate(ice);
if (force || (old_rate != rate))
- ice->set_rate(ice, rate);
+ call_set_rate = true;
else if (rate == ice->cur_rate) {
spin_unlock_irqrestore(&ice->reg_lock, flags);
return 0;
@@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
}
ice->cur_rate = rate;
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+
+ if (call_set_rate)
+ ice->set_rate(ice, rate);
/* setting master clock */
mclk_change = ice->set_mclk(ice, rate);
- spin_unlock_irqrestore(&ice->reg_lock, flags);
-
if (mclk_change && ice->gpio.i2s_mclk_changed)
ice->gpio.i2s_mclk_changed(ice);
if (ice->gpio.set_pro_rate)
diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
index a1db1bce330f..5faecbeb5497 100644
--- a/sound/soc/fsl/fsl_audmix.c
+++ b/sound/soc/fsl/fsl_audmix.c
@@ -505,15 +505,20 @@ static int fsl_audmix_probe(struct platform_device *pdev)
ARRAY_SIZE(fsl_audmix_dai));
if (ret) {
dev_err(dev, "failed to register ASoC DAI\n");
- return ret;
+ goto err_disable_pm;
}
priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0);
if (IS_ERR(priv->pdev)) {
ret = PTR_ERR(priv->pdev);
dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret);
+ goto err_disable_pm;
}
+ return 0;
+
+err_disable_pm:
+ pm_runtime_disable(dev);
return ret;
}
@@ -521,6 +526,8 @@ static int fsl_audmix_remove(struct platform_device *pdev)
{
struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
if (priv->pdev)
platform_device_unregister(priv->pdev);
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index a22f97234201..5f1bf6d3800c 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/dmi.h>
#include <linux/slab.h>
-#include <asm/cpu_device_id.h>
#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/jack.h>
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c84ff1a5bf9..8ef0efeed0a7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -479,6 +479,12 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
goto free_rtd;
rtd->dev = dev;
+ INIT_LIST_HEAD(&rtd->list);
+ INIT_LIST_HEAD(&rtd->component_list);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
dev_set_drvdata(dev, rtd);
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
@@ -494,12 +500,6 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
/*
* rtd remaining settings
*/
- INIT_LIST_HEAD(&rtd->component_list);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
-
rtd->card = card;
rtd->dai_link = dai_link;
if (!rtd->dai_link->ops)
@@ -1871,6 +1871,8 @@ match:
/* convert non BE into BE */
dai_link->no_pcm = 1;
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
/* override any BE fixups */
dai_link->be_hw_params_fixup =
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index b28613149b0c..92e4f4d08bfa 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -548,12 +548,12 @@ static void remove_link(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->link_unload)
dobj->ops->link_unload(comp, dobj);
+ list_del(&dobj->list);
+ snd_soc_remove_dai_link(comp->card, link);
+
kfree(link->name);
kfree(link->stream_name);
kfree(link->cpus->dai_name);
-
- list_del(&dobj->list);
- snd_soc_remove_dai_link(comp->card, link);
kfree(link);
}
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index cfefcfd92798..aef6ca167b9c 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev)
priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
sizeof(*priv->pd_dev), GFP_KERNEL);
- if (!priv)
+ if (!priv->pd_dev)
return -ENOMEM;
priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
@@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev)
}
sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
return 0;
exit_pdev_unregister:
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 8796f385be76..896d21984b73 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -216,6 +216,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
link_dev = hda_link_stream_assign(bus, substream);
if (!link_dev)
return -EBUSY;
+
+ snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
}
stream_tag = hdac_stream(link_dev)->stream_tag;
@@ -228,8 +230,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
-
link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -361,6 +361,13 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream,
bus = hstream->bus;
rtd = snd_pcm_substream_chip(substream);
link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!link_dev) {
+ dev_dbg(dai->dev,
+ "%s: link_dev is not assigned\n", __func__);
+ return -EINVAL;
+ }
+
hda_stream = hstream_to_sof_hda_stream(link_dev);
/* free the link DMA channel in the FW */
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index 5994e1073364..5fdfbaa8c4ed 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -826,6 +826,9 @@ void snd_sof_ipc_free(struct snd_sof_dev *sdev)
{
struct snd_sof_ipc *ipc = sdev->ipc;
+ if (!ipc)
+ return;
+
/* disable sending of ipc's */
mutex_lock(&ipc->tx_mutex);
ipc->disable_ipc_tx = true;
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index 3fd28ee01675..3769d9ce5dbe 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -220,6 +219,7 @@
* @slave_config: dma slave channel runtime config pointer
* @phys_addr: SPDIFRX registers physical base address
* @lock: synchronization enabling lock
+ * @irq_lock: prevent race condition with IRQ on stream state
* @cs: channel status buffer
* @ub: user data buffer
* @irq: SPDIFRX interrupt line
@@ -240,6 +240,7 @@ struct stm32_spdifrx_data {
struct dma_slave_config slave_config;
dma_addr_t phys_addr;
spinlock_t lock; /* Sync enabling lock */
+ spinlock_t irq_lock; /* Prevent race condition on stream state */
unsigned char cs[SPDIFRX_CS_BYTES_NB];
unsigned char ub[SPDIFRX_UB_BYTES_NB];
int irq;
@@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx)
static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
{
int cr, cr_mask, imr, ret;
+ unsigned long flags;
/* Enable IRQs */
imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
@@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
if (ret)
return ret;
- spin_lock(&spdifrx->lock);
+ spin_lock_irqsave(&spdifrx->lock, flags);
spdifrx->refcount++;
@@ -362,7 +364,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
"Failed to start synchronization\n");
}
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
return ret;
}
@@ -370,11 +372,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
{
int cr, cr_mask, reg;
+ unsigned long flags;
- spin_lock(&spdifrx->lock);
+ spin_lock_irqsave(&spdifrx->lock, flags);
if (--spdifrx->refcount) {
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
return;
}
@@ -393,7 +396,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, &reg);
regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, &reg);
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
}
static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
@@ -480,8 +483,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
- pinctrl_pm_select_default_state(&spdifrx->pdev->dev);
-
ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
if (ret < 0)
return ret;
@@ -513,7 +514,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
end:
clk_disable_unprepare(spdifrx->kclk);
- pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev);
return ret;
}
@@ -665,7 +665,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = {
static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
{
struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
- struct snd_pcm_substream *substream = spdifrx->substream;
struct platform_device *pdev = spdifrx->pdev;
unsigned int cr, mask, sr, imr;
unsigned int flags, sync_state;
@@ -745,14 +744,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
return IRQ_HANDLED;
}
- if (substream)
- snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+ spin_lock(&spdifrx->irq_lock);
+ if (spdifrx->substream)
+ snd_pcm_stop(spdifrx->substream,
+ SNDRV_PCM_STATE_DISCONNECTED);
+ spin_unlock(&spdifrx->irq_lock);
return IRQ_HANDLED;
}
- if (err_xrun && substream)
- snd_pcm_stop_xrun(substream);
+ spin_lock(&spdifrx->irq_lock);
+ if (err_xrun && spdifrx->substream)
+ snd_pcm_stop_xrun(spdifrx->substream);
+ spin_unlock(&spdifrx->irq_lock);
return IRQ_HANDLED;
}
@@ -761,9 +765,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&spdifrx->irq_lock, flags);
spdifrx->substream = substream;
+ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
ret = clk_prepare_enable(spdifrx->kclk);
if (ret)
@@ -839,8 +846,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
+ spin_lock_irqsave(&spdifrx->irq_lock, flags);
spdifrx->substream = NULL;
+ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
+
clk_disable_unprepare(spdifrx->kclk);
}
@@ -944,6 +955,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
spdifrx->pdev = pdev;
init_completion(&spdifrx->cs_completion);
spin_lock_init(&spdifrx->lock);
+ spin_lock_init(&spdifrx->irq_lock);
platform_set_drvdata(pdev, spdifrx);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 2991b9986f66..395403a2d33f 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -145,6 +145,7 @@ struct snd_usb_substream {
struct snd_usb_endpoint *sync_endpoint;
unsigned long flags;
bool need_setup_ep; /* (re)configure EP at prepare? */
+ bool need_setup_fmt; /* (re)configure fmt after resume? */
unsigned int speed; /* USB_SPEED_XXX */
u64 formats; /* format bitmasks (all or'ed) */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 9c8930bb00c8..a11c8150af58 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
if (WARN_ON(!iface))
return -EINVAL;
alts = usb_altnum_to_altsetting(iface, fmt->altsetting);
- altsd = get_iface_desc(alts);
- if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
+ if (WARN_ON(!alts))
return -EINVAL;
+ altsd = get_iface_desc(alts);
- if (fmt == subs->cur_audiofmt)
+ if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt)
return 0;
/* close the old interface */
- if (subs->interface >= 0 && subs->interface != fmt->iface) {
+ if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) {
if (!subs->stream->chip->keep_iface) {
err = usb_set_interface(subs->dev, subs->interface, 0);
if (err < 0) {
@@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
subs->altset_idx = 0;
}
+ if (subs->need_setup_fmt)
+ subs->need_setup_fmt = false;
+
/* set interface */
if (iface->cur_altsetting != alts) {
err = snd_usb_select_mode_quirk(subs, fmt);
@@ -1728,6 +1731,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
subs->data_endpoint->retire_data_urb = retire_playback_urb;
subs->running = 0;
return 0;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+ stop_endpoints(subs, true);
+ subs->need_setup_fmt = true;
+ return 0;
+ }
+ break;
}
return -EINVAL;
@@ -1760,6 +1770,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
subs->data_endpoint->retire_data_urb = retire_capture_urb;
subs->running = 1;
return 0;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+ stop_endpoints(subs, true);
+ subs->need_setup_fmt = true;
+ return 0;
+ }
+ break;
}
return -EINVAL;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 70c338f3ae24..d187aa6d50db 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.vendor_name = "Dell",
.product_name = "WD19 Dock",
.profile_name = "Dell-WD15-Dock",
- .ifnum = QUIRK_NO_INTERFACE
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_SETUP_FMT_AFTER_RESUME
}
},
/* MOTU Microbook II */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 349e1e52996d..82184036437b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
return snd_usb_create_mixer(chip, quirk->ifnum, 0);
}
+
+static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ chip->setup_fmt_after_resume_quirk = 1;
+ return 1; /* Continue with creating streams and mixer */
+}
+
/*
* audio-interface quirks
*
@@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
[QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
[QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
[QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
+ [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
};
if (quirk->type < QUIRK_TYPE_COUNT) {
@@ -1386,6 +1397,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+ case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index ff3cbf653de8..6fe3ab582ec6 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -33,7 +33,7 @@ struct snd_usb_audio {
wait_queue_head_t shutdown_wait;
unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
unsigned int tx_length_quirk:1; /* Put length specifier in transfers */
-
+ unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */
int num_interfaces;
int num_suspended_intf;
int sample_rate_read_error;
@@ -98,6 +98,7 @@ enum quirk_type {
QUIRK_AUDIO_EDIROL_UAXX,
QUIRK_AUDIO_ALIGN_TRANSFER,
QUIRK_AUDIO_STANDARD_MIXER,
+ QUIRK_SETUP_FMT_AFTER_RESUME,
QUIRK_TYPE_COUNT
};
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index defae23a0169..97830e46d1a0 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -138,6 +138,7 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/
BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
VERSION_SCRIPT := libbpf.map
+BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -159,7 +160,7 @@ all: fixdep
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_SHARED): force elfdep bpfdep $(BPF_HELPER_DEFS)
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -177,12 +178,12 @@ $(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
-$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_STATIC): force elfdep bpfdep $(BPF_HELPER_DEFS)
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
-bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h
+$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
$(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
- --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h
+ --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
@@ -243,7 +244,7 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
-install_headers: bpf_helper_defs.h
+install_headers: $(BPF_HELPER_DEFS)
$(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
@@ -251,7 +252,7 @@ install_headers: bpf_helper_defs.h
$(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
$(call do_install,xsk.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
+ $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \
$(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
@@ -271,7 +272,7 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
*.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
- *.pc LIBBPF-CFLAGS bpf_helper_defs.h \
+ *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
$(SHARED_OBJDIR) $(STATIC_OBJDIR)
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 419652458da4..1ff0a9f49c01 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -40,3 +40,4 @@ xdping
test_cpp
/no_alu32
/bpf_gcc
+bpf_helper_defs.h
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e0fe01d9ec33..e2fd6f8d579c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -120,9 +120,9 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
-$(BPFDIR)/bpf_helper_defs.h:
- $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h
+BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
+$(OUTPUT)/bpf_helper_defs.h:
+ $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh
index 6e4626ae71b0..8f4057310b5b 100755
--- a/tools/testing/selftests/net/forwarding/loopback.sh
+++ b/tools/testing/selftests/net/forwarding/loopback.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
ALL_TESTS="loopback_test"
NUM_NETIFS=2
source tc_common.sh
@@ -72,6 +75,11 @@ setup_prepare()
h1_create
h2_create
+
+ if ethtool -k $h1 | grep loopback | grep -q fixed; then
+ log_test "SKIP: dev $h1 does not support loopback feature"
+ exit $ksft_skip
+ fi
}
cleanup()
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index 16571ac1dab4..d3e0809ab368 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -226,17 +226,19 @@ check_transfer()
return 0
}
-test_tcp_forwarding()
+test_tcp_forwarding_ip()
{
local nsa=$1
local nsb=$2
+ local dstip=$3
+ local dstport=$4
local lret=0
ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
lpid=$!
sleep 1
- ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" &
+ ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" &
cpid=$!
sleep 3
@@ -258,6 +260,28 @@ test_tcp_forwarding()
return $lret
}
+test_tcp_forwarding()
+{
+ test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+
+ return $?
+}
+
+test_tcp_forwarding_nat()
+{
+ local lret
+
+ test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+ lret=$?
+
+ if [ $lret -eq 0 ] ; then
+ test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
+ lret=$?
+ fi
+
+ return $lret
+}
+
make_file "$ns1in" "ns1"
make_file "$ns2in" "ns2"
@@ -283,14 +307,19 @@ ip -net ns2 route add 192.168.10.1 via 10.0.2.1
# Same, but with NAT enabled.
ip netns exec nsr1 nft -f - <<EOF
table ip nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif "veth0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345
+ }
+
chain postrouting {
type nat hook postrouting priority 0; policy accept;
- meta oifname "veth1" masquerade
+ meta oifname "veth1" counter masquerade
}
}
EOF
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
if [ $? -eq 0 ] ;then
echo "PASS: flow offloaded for ns1/ns2 with NAT"
@@ -313,7 +342,7 @@ fi
ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
if [ $? -eq 0 ] ;then
echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
else
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 6944b898bb53..ee1b727ede04 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -3158,7 +3158,18 @@ TEST(user_notification_basic)
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLIN);
- EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+ /* Test that we can't pass garbage to the kernel. */
+ memset(&req, 0, sizeof(req));
+ req.pid = -1;
+ errno = 0;
+ ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EINVAL, errno);
+
+ if (ret) {
+ req.pid = 0;
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+ }
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
@@ -3278,6 +3289,7 @@ TEST(user_notification_signal)
close(sk_pair[1]);
+ memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(kill(pid, SIGUSR1), 0);
@@ -3296,6 +3308,7 @@ TEST(user_notification_signal)
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, ENOENT);
+ memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
resp.id = req.id;
diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh
index 0aad760fcd8c..2bbac73e6477 100755
--- a/usr/gen_initramfs_list.sh
+++ b/usr/gen_initramfs_list.sh
@@ -128,7 +128,7 @@ parse() {
str="${ftype} ${name} ${location} ${str}"
;;
"nod")
- local dev=`LC_ALL=C ls -l "${location}"`
+ local dev="`LC_ALL=C ls -l "${location}"`"
local maj=`field 5 ${dev}`
local min=`field 6 ${dev}`
maj=${maj%,}